mirror of
https://github.com/hauler-dev/hauler.git
synced 2026-02-20 04:49:51 +00:00
Compare commits
47 Commits
v1.2.0-dev
...
v1.3.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
15867e84ad | ||
|
|
c5da018450 | ||
|
|
5edc8802ee | ||
|
|
a3d62b204f | ||
|
|
d85a1b0775 | ||
|
|
ea10bc0256 | ||
|
|
1aea670588 | ||
|
|
f1a632a207 | ||
|
|
802e062f47 | ||
|
|
d227e1f18f | ||
|
|
33a9bb3f78 | ||
|
|
344c008607 | ||
|
|
09a149dab6 | ||
|
|
f7f1e2db8f | ||
|
|
0fafca87f9 | ||
|
|
38e676e934 | ||
|
|
369c85bab9 | ||
|
|
acbd1f1b6a | ||
|
|
3e44c53b75 | ||
|
|
062bb3ff2c | ||
|
|
c8b4e80371 | ||
|
|
d86957bf20 | ||
|
|
4a6fc8cec2 | ||
|
|
e089c31879 | ||
|
|
b7b599e6ed | ||
|
|
ea53002f3a | ||
|
|
4d0f779ae6 | ||
|
|
4d0b407452 | ||
|
|
3b96a95a94 | ||
|
|
f9a188259f | ||
|
|
5021f3ab6b | ||
|
|
db065a1088 | ||
|
|
01bf58de03 | ||
|
|
38b979d0c5 | ||
|
|
7de20a1f15 | ||
|
|
088fde5aa9 | ||
|
|
eb275b9690 | ||
|
|
7d28df1949 | ||
|
|
08f566fb28 | ||
|
|
c465d2c143 | ||
|
|
39325585eb | ||
|
|
535a82c1b5 | ||
|
|
53cf953750 | ||
|
|
ff144b1180 | ||
|
|
938914ba5c | ||
|
|
603249dea9 | ||
|
|
37032f5379 |
27
.github/workflows/pages.yaml
vendored
27
.github/workflows/pages.yaml
vendored
@@ -1,40 +1,47 @@
|
||||
# Simple workflow for deploying static content to GitHub Pages
|
||||
name: 📋
|
||||
name: Pages Workflow
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
||||
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
# Single deploy job since we're just deploying
|
||||
deploy:
|
||||
deploy-pages:
|
||||
name: Deploy GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v5
|
||||
- name: Upload artifact
|
||||
|
||||
- name: Upload Pages Artifacts
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: './static'
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
37
.github/workflows/testdata.yaml
vendored
Normal file
37
.github/workflows/testdata.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Refresh Hauler Testdata
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
refresh-testdata:
|
||||
name: Refresh Hauler Testdata
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Fetch Hauler Binary
|
||||
run: curl -sfL https://get.hauler.dev | bash
|
||||
|
||||
- name: Login to GitHub Container Registry and Docker Hub Container Registry
|
||||
run: |
|
||||
hauler login ghcr.io --username ${{ github.repository_owner }} --password ${{ secrets.GITHUB_TOKEN }}
|
||||
hauler login docker.io --username ${{ secrets.DOCKERHUB_USERNAME }} --password ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Process Testdata Manifests
|
||||
run: |
|
||||
for manifest in testdata/*.yaml; do
|
||||
echo "Processing $manifest..."
|
||||
name=$(basename "$manifest" .yaml)
|
||||
hauler store sync --filename "$manifest"
|
||||
done
|
||||
|
||||
- name: Push Store Contents to Hauler-Dev GitHub Container Registry
|
||||
run: |
|
||||
hauler store copy registry://ghcr.io/${{ github.repository_owner }}
|
||||
|
||||
- name: Verify Hauler Store Contents
|
||||
run: hauler store info
|
||||
38
.github/workflows/tests.yaml
vendored
38
.github/workflows/tests.yaml
vendored
@@ -114,12 +114,8 @@ jobs:
|
||||
- name: Verify - hauler login
|
||||
run: |
|
||||
hauler login --help
|
||||
hauler login docker.io --username bob --password haulin
|
||||
echo "hauler" | hauler login docker.io -u bob --password-stdin
|
||||
|
||||
- name: Remove Hauler Store Credentials
|
||||
run: |
|
||||
rm -rf ~/.docker/config.json
|
||||
hauler login docker.io --username ${{ secrets.DOCKERHUB_USERNAME }} --password ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | hauler login ghcr.io --username ${{ github.repository_owner }} --password-stdin
|
||||
|
||||
- name: Verify - hauler store
|
||||
run: |
|
||||
@@ -166,11 +162,11 @@ jobs:
|
||||
run: |
|
||||
hauler store add image --help
|
||||
# verify via image reference
|
||||
hauler store add image busybox
|
||||
hauler store add image ghcr.io/hauler-dev/library/busybox
|
||||
# verify via image reference with version and platform
|
||||
hauler store add image busybox:stable --platform linux/amd64
|
||||
hauler store add image ghcr.io/hauler-dev/library/busybox:stable --platform linux/amd64
|
||||
# verify via image reference with full reference
|
||||
hauler store add image gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
hauler store add image ghcr.io/hauler-dev/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
# verify via the hauler store contents
|
||||
hauler store info
|
||||
|
||||
@@ -222,11 +218,13 @@ jobs:
|
||||
run: |
|
||||
hauler store load --help
|
||||
# verify via load
|
||||
hauler store load haul.tar.zst
|
||||
hauler store load
|
||||
# verify via load with multiple files
|
||||
hauler store load --filename haul.tar.zst --filename store.tar.zst
|
||||
# verify via load with filename and temp directory
|
||||
hauler store load store.tar.zst --tempdir /opt
|
||||
hauler store load --filename store.tar.zst --tempdir /opt
|
||||
# verify via load with filename and platform (amd64)
|
||||
hauler store load store-amd64.tar.zst
|
||||
hauler store load --filename store-amd64.tar.zst
|
||||
|
||||
- name: Verify Hauler Store Contents
|
||||
run: |
|
||||
@@ -255,10 +253,10 @@ jobs:
|
||||
- name: Verify - hauler store sync
|
||||
run: |
|
||||
hauler store sync --help
|
||||
# download local helm repository
|
||||
curl -sfOL https://github.com/rancherfederal/rancher-cluster-templates/releases/download/rancher-cluster-templates-0.5.2/rancher-cluster-templates-0.5.2.tgz
|
||||
# verify via sync
|
||||
hauler store sync --files testdata/hauler-manifest-pipeline.yaml
|
||||
hauler store sync --filename testdata/hauler-manifest-pipeline.yaml
|
||||
# verify via sync with multiple files
|
||||
hauler store sync --filename testdata/hauler-manifest-pipeline.yaml --filename testdata/hauler-manifest.yaml
|
||||
# need more tests here
|
||||
|
||||
- name: Verify - hauler store serve
|
||||
@@ -331,3 +329,13 @@ jobs:
|
||||
with:
|
||||
name: hauler-report
|
||||
path: hauler-report.txt
|
||||
|
||||
- name: Verify - hauler logout
|
||||
run: |
|
||||
hauler logout --help
|
||||
hauler logout docker.io
|
||||
hauler logout ghcr.io
|
||||
|
||||
- name: Remove Hauler Store Credentials
|
||||
run: |
|
||||
rm -rf ~/.docker/config.json
|
||||
|
||||
112
.goreleaser.yaml
112
.goreleaser.yaml
@@ -39,83 +39,53 @@ changelog:
|
||||
disable: false
|
||||
use: git
|
||||
|
||||
brews:
|
||||
homebrew_casks:
|
||||
- name: hauler
|
||||
repository:
|
||||
owner: hauler-dev
|
||||
name: homebrew-tap
|
||||
token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}"
|
||||
directory: Formula
|
||||
description: "Hauler CLI"
|
||||
description: "Hauler: Airgap Swiss Army Knife"
|
||||
|
||||
dockers:
|
||||
- id: hauler-amd64
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
use: buildx
|
||||
dockers_v2:
|
||||
- id: hauler
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/amd64"
|
||||
flags:
|
||||
- "--target=release"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-amd64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-amd64:{{ .Version }}"
|
||||
- id: hauler-arm64
|
||||
goos: linux
|
||||
goarch: arm64
|
||||
use: buildx
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/arm64"
|
||||
- "--target=release"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-arm64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-arm64:{{ .Version }}"
|
||||
- id: hauler-debug-amd64
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
use: buildx
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/amd64"
|
||||
- "--target=debug"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-debug-amd64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-debug-amd64:{{ .Version }}"
|
||||
- id: hauler-debug-arm64
|
||||
goos: linux
|
||||
goarch: arm64
|
||||
use: buildx
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/arm64"
|
||||
- "--target=debug"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-debug-arm64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-debug-arm64:{{ .Version }}"
|
||||
images:
|
||||
- docker.io/hauler/hauler
|
||||
- ghcr.io/hauler-dev/hauler
|
||||
tags:
|
||||
- "{{ .Version }}"
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
labels:
|
||||
"classification": "UNCLASSIFIED"
|
||||
"org.opencontainers.image.created": "{{.Date}}"
|
||||
"org.opencontainers.image.description": "Hauler: Airgap Swiss Army Knife"
|
||||
"org.opencontainers.image.name": "{{.ProjectName}}-debug"
|
||||
"org.opencontainers.image.revision": "{{.FullCommit}}"
|
||||
"org.opencontainers.image.source": "{{.GitURL}}"
|
||||
"org.opencontainers.image.version": "{{.Version}}"
|
||||
|
||||
docker_manifests:
|
||||
- id: hauler-docker
|
||||
use: docker
|
||||
name_template: "docker.io/hauler/hauler:{{ .Version }}"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-amd64:{{ .Version }}"
|
||||
- "docker.io/hauler/hauler-arm64:{{ .Version }}"
|
||||
- id: hauler-ghcr
|
||||
use: docker
|
||||
name_template: "ghcr.io/hauler-dev/hauler:{{ .Version }}"
|
||||
image_templates:
|
||||
- "ghcr.io/hauler-dev/hauler-amd64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-arm64:{{ .Version }}"
|
||||
- id: hauler-debug-docker
|
||||
use: docker
|
||||
name_template: "docker.io/hauler/hauler-debug:{{ .Version }}"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-debug-amd64:{{ .Version }}"
|
||||
- "docker.io/hauler/hauler-debug-arm64:{{ .Version }}"
|
||||
- id: hauler-debug-ghcr
|
||||
use: docker
|
||||
name_template: "ghcr.io/hauler-dev/hauler-debug:{{ .Version }}"
|
||||
image_templates:
|
||||
- "ghcr.io/hauler-dev/hauler-debug-amd64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-debug-arm64:{{ .Version }}"
|
||||
- id: hauler-debug
|
||||
dockerfile: Dockerfile
|
||||
flags:
|
||||
- "--target=debug"
|
||||
images:
|
||||
- docker.io/hauler/hauler-debug
|
||||
- ghcr.io/hauler-dev/hauler-debug
|
||||
tags:
|
||||
- "{{ .Version }}"
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
labels:
|
||||
"classification": "UNCLASSIFIED"
|
||||
"org.opencontainers.image.created": "{{.Date}}"
|
||||
"org.opencontainers.image.description": "Hauler: Airgap Swiss Army Knife"
|
||||
"org.opencontainers.image.name": "{{.ProjectName}}-debug"
|
||||
"org.opencontainers.image.revision": "{{.FullCommit}}"
|
||||
"org.opencontainers.image.source": "{{.GitURL}}"
|
||||
"org.opencontainers.image.version": "{{.Version}}"
|
||||
@@ -1,8 +1,9 @@
|
||||
# builder stage
|
||||
FROM registry.suse.com/bci/bci-base:15.5 AS builder
|
||||
FROM registry.suse.com/bci/bci-base:15.7 AS builder
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
# fetched from goreleaser build proccess
|
||||
COPY hauler /hauler
|
||||
# fetched from goreleaser build process
|
||||
COPY $TARGETPLATFORM/hauler /hauler
|
||||
|
||||
RUN echo "hauler:x:1001:1001::/home/hauler:" > /etc/passwd \
|
||||
&& echo "hauler:x:1001:hauler" > /etc/group \
|
||||
@@ -39,4 +40,4 @@ COPY --from=builder --chown=hauler:hauler /hauler /usr/local/bin/hauler
|
||||
RUN apk --no-cache add curl
|
||||
|
||||
USER hauler
|
||||
WORKDIR /home/hauler
|
||||
WORKDIR /home/hauler
|
||||
|
||||
21
README.md
21
README.md
@@ -10,6 +10,27 @@
|
||||
|
||||
For more information, please review the **[Hauler Documentation](https://hauler.dev)!**
|
||||
|
||||
## Recent Changes
|
||||
|
||||
### In Hauler v1.2.0...
|
||||
|
||||
- Upgraded the `apiVersion` to `v1` from `v1alpha1`
|
||||
- Users are able to use `v1` and `v1alpha1`, but `v1alpha1` is now deprecated and will be removed in a future release. We will update the community when we fully deprecate and remove the functionality of `v1alpha1`
|
||||
- Users will see logging notices when using the old `apiVersion` such as...
|
||||
- `!!! DEPRECATION WARNING !!! apiVersion [v1alpha1] will be removed in a future release !!! DEPRECATION WARNING !!!`
|
||||
---
|
||||
- Updated the behavior of `hauler store load` to default to loading a `haul` with the name of `haul.tar.zst` and requires the flag of `--filename/-f` to load a `haul` with a different name
|
||||
- Users can load multiple `hauls` by specifying multiple flags of `--filename/-f`
|
||||
- updated command usage: `hauler store load --filename hauling-hauls.tar.zst`
|
||||
- previous command usage (do not use): `hauler store load hauling-hauls.tar.zst`
|
||||
---
|
||||
- Updated the behavior of `hauler store sync` to default to syncing a `manifest` with the name of `hauler-manifest.yaml` and requires the flag of `--filename/-f` to sync a `manifest` with a different name
|
||||
- Users can sync multiple `manifests` by specifying multiple flags of `--filename/-f`
|
||||
- updated command usage: `hauler store sync --filename hauling-hauls-manifest.yaml`
|
||||
- previous command usage (do not use): `hauler store sync --files hauling-hauls-manifest.yaml`
|
||||
---
|
||||
Please review the documentation for any additional [Known Limits, Issues, and Notices](https://docs.hauler.dev/docs/known-limits)!
|
||||
|
||||
## Installation
|
||||
|
||||
### Linux/Darwin
|
||||
|
||||
@@ -30,6 +30,7 @@ func New(ctx context.Context, ro *flags.CliRootOpts) *cobra.Command {
|
||||
flags.AddRootFlags(cmd, ro)
|
||||
|
||||
cmd.AddCommand(cranecmd.NewCmdAuthLogin("hauler"))
|
||||
cmd.AddCommand(cranecmd.NewCmdAuthLogout("hauler"))
|
||||
addStore(cmd, ro)
|
||||
addVersion(cmd, ro)
|
||||
addCompletion(cmd, ro)
|
||||
|
||||
@@ -67,6 +67,17 @@ func addStoreSync(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
cmd := &cobra.Command{
|
||||
Use: "sync",
|
||||
Short: "Sync content to the content store",
|
||||
Args: cobra.ExactArgs(0),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Check if the products flag was passed
|
||||
if len(o.Products) > 0 {
|
||||
// Only clear the default if the user did NOT explicitly set --filename
|
||||
if !cmd.Flags().Changed("filename") {
|
||||
o.FileName = []string{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -89,7 +100,7 @@ func addStoreLoad(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
cmd := &cobra.Command{
|
||||
Use: "load",
|
||||
Short: "Load a content store from a store archive",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -99,7 +110,7 @@ func addStoreLoad(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
}
|
||||
_ = s
|
||||
|
||||
return store.LoadCmd(ctx, o, args...)
|
||||
return store.LoadCmd(ctx, o, rso, ro)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
@@ -123,7 +134,6 @@ func addStoreServe(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comma
|
||||
return cmd
|
||||
}
|
||||
|
||||
// RegistryCmd serves the registry
|
||||
func addStoreServeRegistry(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.ServeRegistryOpts{StoreRootOpts: rso}
|
||||
|
||||
@@ -147,7 +157,6 @@ func addStoreServeRegistry(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cob
|
||||
return cmd
|
||||
}
|
||||
|
||||
// FileServerCmd serves the file server
|
||||
func addStoreServeFiles(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.ServeFilesOpts{StoreRootOpts: rso}
|
||||
|
||||
@@ -187,7 +196,7 @@ func addStoreSave(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
}
|
||||
_ = s
|
||||
|
||||
return store.SaveCmd(ctx, o, o.FileName)
|
||||
return store.SaveCmd(ctx, o, rso, ro)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
@@ -311,13 +320,14 @@ hauler store add image busybox
|
||||
hauler store add image library/busybox:stable
|
||||
|
||||
# fetch image with full image reference and specific platform
|
||||
hauler store add image ghcr.io/hauler-dev/hauler-debug:v1.0.7 --platform linux/amd64
|
||||
hauler store add image ghcr.io/hauler-dev/hauler-debug:v1.2.0 --platform linux/amd64
|
||||
|
||||
# fetch image with full image reference via digest
|
||||
hauler store add image gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
|
||||
# fetch image with full image reference, specific platform, and signature verification
|
||||
hauler store add image rgcrprod.azurecr.us/hauler/rke2-manifest.yaml:v1.28.12-rke2r1 --platform linux/amd64 --key carbide-key.pub`,
|
||||
curl -sfOL https://raw.githubusercontent.com/rancherfederal/carbide-releases/main/carbide-key.pub
|
||||
hauler store add image rgcrprod.azurecr.us/rancher/rke2-runtime:v1.31.5-rke2r1 --platform linux/amd64 --key carbide-key.pub`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
@@ -351,13 +361,13 @@ hauler store add chart path/to/chart.tar.gz --repo .
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev
|
||||
|
||||
# fetch remote oci helm chart with version
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --version 1.0.6
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --version 1.2.0
|
||||
|
||||
# fetch remote helm chart
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable
|
||||
|
||||
# fetch remote helm chart with specific version
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/latest --version 2.9.1`,
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/latest --version 2.10.1`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -8,19 +8,19 @@ import (
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/file"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/file/getter"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content/chart"
|
||||
"hauler.dev/go/hauler/pkg/cosign"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
func AddFileCmd(ctx context.Context, o *flags.AddFileOpts, s *store.Layout, reference string) error {
|
||||
cfg := v1alpha1.File{
|
||||
cfg := v1.File{
|
||||
Path: reference,
|
||||
}
|
||||
if len(o.Name) > 0 {
|
||||
@@ -29,7 +29,7 @@ func AddFileCmd(ctx context.Context, o *flags.AddFileOpts, s *store.Layout, refe
|
||||
return storeFile(ctx, s, cfg)
|
||||
}
|
||||
|
||||
func storeFile(ctx context.Context, s *store.Layout, fi v1alpha1.File) error {
|
||||
func storeFile(ctx context.Context, s *store.Layout, fi v1.File) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
copts := getter.ClientOptions{
|
||||
@@ -42,13 +42,13 @@ func storeFile(ctx context.Context, s *store.Layout, fi v1alpha1.File) error {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("adding 'file' [%s] to the store as [%s]", fi.Path, ref.Name())
|
||||
l.Infof("adding file [%s] to the store as [%s]", fi.Path, ref.Name())
|
||||
_, err = s.AddOCI(ctx, f, ref.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("successfully added 'file' [%s]", ref.Name())
|
||||
l.Infof("successfully added file [%s]", ref.Name())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -56,24 +56,32 @@ func storeFile(ctx context.Context, s *store.Layout, fi v1alpha1.File) error {
|
||||
func AddImageCmd(ctx context.Context, o *flags.AddImageOpts, s *store.Layout, reference string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
cfg := v1alpha1.Image{
|
||||
cfg := v1.Image{
|
||||
Name: reference,
|
||||
}
|
||||
|
||||
// Check if the user provided a key.
|
||||
if o.Key != "" {
|
||||
// verify signature using the provided key.
|
||||
err := cosign.VerifySignature(ctx, s, o.Key, cfg.Name, rso, ro)
|
||||
err := cosign.VerifySignature(ctx, s, o.Key, o.Tlog, cfg.Name, rso, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", cfg.Name)
|
||||
} else if o.CertIdentityRegexp != "" || o.CertIdentity != "" {
|
||||
// verify signature using the provided keyless details
|
||||
l.Infof("verifying keyless signature for [%s]", cfg.Name)
|
||||
err := cosign.VerifyKeylessSignature(ctx, s, o.CertIdentity, o.CertIdentityRegexp, o.CertOidcIssuer, o.CertOidcIssuerRegexp, o.CertGithubWorkflowRepository, o.Tlog, cfg.Name, rso, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Infof("keyless signature verified for image [%s]", cfg.Name)
|
||||
}
|
||||
|
||||
return storeImage(ctx, s, cfg, o.Platform, rso, ro)
|
||||
}
|
||||
|
||||
func storeImage(ctx context.Context, s *store.Layout, i v1alpha1.Image, platform string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
func storeImage(ctx context.Context, s *store.Layout, i v1.Image, platform string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if !ro.IgnoreErrors {
|
||||
@@ -83,15 +91,15 @@ func storeImage(ctx context.Context, s *store.Layout, i v1alpha1.Image, platform
|
||||
}
|
||||
}
|
||||
|
||||
l.Infof("adding 'image' [%s] to the store", i.Name)
|
||||
l.Infof("adding image [%s] to the store", i.Name)
|
||||
|
||||
r, err := name.ParseReference(i.Name)
|
||||
if err != nil {
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("unable to parse 'image' [%s]: %v... skipping...", i.Name, err)
|
||||
l.Warnf("unable to parse image [%s]: %v... skipping...", i.Name, err)
|
||||
return nil
|
||||
} else {
|
||||
l.Errorf("unable to parse 'image' [%s]: %v", i.Name, err)
|
||||
l.Errorf("unable to parse image [%s]: %v", i.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -99,21 +107,20 @@ func storeImage(ctx context.Context, s *store.Layout, i v1alpha1.Image, platform
|
||||
err = cosign.SaveImage(ctx, s, r.Name(), platform, rso, ro)
|
||||
if err != nil {
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("unable to add 'image' [%s] to store: %v... skipping...", r.Name(), err)
|
||||
l.Warnf("unable to add image [%s] to store: %v... skipping...", r.Name(), err)
|
||||
return nil
|
||||
} else {
|
||||
l.Errorf("unable to add 'image' [%s] to store: %v", r.Name(), err)
|
||||
l.Errorf("unable to add image [%s] to store: %v", r.Name(), err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
l.Infof("successfully added 'image' [%s]", r.Name())
|
||||
l.Infof("successfully added image [%s]", r.Name())
|
||||
return nil
|
||||
}
|
||||
|
||||
func AddChartCmd(ctx context.Context, o *flags.AddChartOpts, s *store.Layout, chartName string) error {
|
||||
// TODO: Reduce duplicates between api chart and upstream helm opts
|
||||
cfg := v1alpha1.Chart{
|
||||
cfg := v1.Chart{
|
||||
Name: chartName,
|
||||
RepoURL: o.ChartOpts.RepoURL,
|
||||
Version: o.ChartOpts.Version,
|
||||
@@ -122,10 +129,10 @@ func AddChartCmd(ctx context.Context, o *flags.AddChartOpts, s *store.Layout, ch
|
||||
return storeChart(ctx, s, cfg, o.ChartOpts)
|
||||
}
|
||||
|
||||
func storeChart(ctx context.Context, s *store.Layout, cfg v1alpha1.Chart, opts *action.ChartPathOptions) error {
|
||||
func storeChart(ctx context.Context, s *store.Layout, cfg v1.Chart, opts *action.ChartPathOptions) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
l.Infof("adding 'chart' [%s] to the store", cfg.Name)
|
||||
l.Infof("adding chart [%s] to the store", cfg.Name)
|
||||
|
||||
// TODO: This shouldn't be necessary
|
||||
opts.RepoURL = cfg.RepoURL
|
||||
@@ -150,6 +157,6 @@ func storeChart(ctx context.Context, s *store.Layout, cfg v1alpha1.Chart, opts *
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("successfully added 'chart' [%s]", ref.Name())
|
||||
l.Infof("successfully added chart [%s]", ref.Name())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -16,6 +16,10 @@ import (
|
||||
func CopyCmd(ctx context.Context, o *flags.CopyOpts, s *store.Layout, targetRef string, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if o.Username != "" || o.Password != "" {
|
||||
return fmt.Errorf("--username/--password have been deprecated, please use 'hauler login'")
|
||||
}
|
||||
|
||||
components := strings.SplitN(targetRef, "://", 2)
|
||||
switch components[0] {
|
||||
case "dir":
|
||||
@@ -31,13 +35,11 @@ func CopyCmd(ctx context.Context, o *flags.CopyOpts, s *store.Layout, targetRef
|
||||
case "registry":
|
||||
l.Debugf("identified registry target reference of [%s]", components[1])
|
||||
ropts := content.RegistryOptions{
|
||||
Username: o.Username,
|
||||
Password: o.Password,
|
||||
Insecure: o.Insecure,
|
||||
PlainHTTP: o.PlainHTTP,
|
||||
}
|
||||
|
||||
err := cosign.LoadImages(ctx, s, components[1], ropts, ro)
|
||||
err := cosign.LoadImages(ctx, s, components[1], o.Only, ropts, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/olekukonko/tablewriter"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -47,12 +48,20 @@ func InfoCmd(ctx context.Context, o *flags.InfoOpts, s *store.Layout) error {
|
||||
return err
|
||||
}
|
||||
|
||||
i := newItem(s, desc, internalManifest, fmt.Sprintf("%s/%s", internalDesc.Platform.OS, internalDesc.Platform.Architecture), o)
|
||||
i := newItemWithDigest(
|
||||
s,
|
||||
internalDesc.Digest.String(),
|
||||
desc,
|
||||
internalManifest,
|
||||
fmt.Sprintf("%s/%s", internalDesc.Platform.OS, internalDesc.Platform.Architecture),
|
||||
o,
|
||||
)
|
||||
var emptyItem item
|
||||
if i != emptyItem {
|
||||
items = append(items, i)
|
||||
}
|
||||
}
|
||||
|
||||
// handle "non" multi-arch images
|
||||
} else if desc.MediaType == consts.DockerManifestSchema2 || desc.MediaType == consts.OCIManifestSchema1 {
|
||||
var m ocispec.Manifest
|
||||
@@ -66,14 +75,15 @@ func InfoCmd(ctx context.Context, o *flags.InfoOpts, s *store.Layout) error {
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
// Unmarshal the OCI image content
|
||||
// unmarshal the oci image content
|
||||
var internalManifest ocispec.Image
|
||||
if err := json.NewDecoder(rc).Decode(&internalManifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if internalManifest.Architecture != "" {
|
||||
i := newItem(s, desc, m, fmt.Sprintf("%s/%s", internalManifest.OS, internalManifest.Architecture), o)
|
||||
i := newItem(s, desc, m,
|
||||
fmt.Sprintf("%s/%s", internalManifest.OS, internalManifest.Architecture), o)
|
||||
var emptyItem item
|
||||
if i != emptyItem {
|
||||
items = append(items, i)
|
||||
@@ -85,7 +95,8 @@ func InfoCmd(ctx context.Context, o *flags.InfoOpts, s *store.Layout) error {
|
||||
items = append(items, i)
|
||||
}
|
||||
}
|
||||
// handle the rest
|
||||
|
||||
// handle everything else (charts, files, sigs, etc.)
|
||||
} else {
|
||||
var m ocispec.Manifest
|
||||
if err := json.NewDecoder(rc).Decode(&m); err != nil {
|
||||
@@ -118,13 +129,13 @@ func InfoCmd(ctx context.Context, o *flags.InfoOpts, s *store.Layout) error {
|
||||
msg = buildJson(items...)
|
||||
fmt.Println(msg)
|
||||
default:
|
||||
buildTable(items...)
|
||||
buildTable(o.ShowDigests, items...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildListRepos(items ...item) {
|
||||
// Create map to track unique repository names
|
||||
// create map to track unique repository names
|
||||
repos := make(map[string]bool)
|
||||
|
||||
for _, i := range items {
|
||||
@@ -141,39 +152,85 @@ func buildListRepos(items ...item) {
|
||||
repos[repoName] = true
|
||||
}
|
||||
|
||||
// Collect and print unique repository names
|
||||
// collect and print unique repository names
|
||||
for repoName := range repos {
|
||||
fmt.Println(repoName)
|
||||
}
|
||||
}
|
||||
|
||||
func buildTable(items ...item) {
|
||||
// Create a table for the results
|
||||
func buildTable(showDigests bool, items ...item) {
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"Reference", "Type", "Platform", "# Layers", "Size"})
|
||||
|
||||
if showDigests {
|
||||
table.SetHeader([]string{"Reference", "Type", "Platform", "Digest", "# Layers", "Size"})
|
||||
} else {
|
||||
table.SetHeader([]string{"Reference", "Type", "Platform", "# Layers", "Size"})
|
||||
}
|
||||
|
||||
table.SetHeaderAlignment(tablewriter.ALIGN_LEFT)
|
||||
table.SetRowLine(false)
|
||||
table.SetAutoMergeCellsByColumnIndex([]int{0})
|
||||
|
||||
totalSize := int64(0)
|
||||
|
||||
for _, i := range items {
|
||||
if i.Type != "" {
|
||||
row := []string{
|
||||
i.Reference,
|
||||
if i.Type == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ref := truncateReference(i.Reference)
|
||||
var row []string
|
||||
|
||||
if showDigests {
|
||||
digest := i.Digest
|
||||
if digest == "" {
|
||||
digest = "-"
|
||||
}
|
||||
row = []string{
|
||||
ref,
|
||||
i.Type,
|
||||
i.Platform,
|
||||
digest,
|
||||
fmt.Sprintf("%d", i.Layers),
|
||||
byteCountSI(i.Size),
|
||||
}
|
||||
} else {
|
||||
row = []string{
|
||||
ref,
|
||||
i.Type,
|
||||
i.Platform,
|
||||
fmt.Sprintf("%d", i.Layers),
|
||||
byteCountSI(i.Size),
|
||||
}
|
||||
totalSize += i.Size
|
||||
table.Append(row)
|
||||
}
|
||||
|
||||
totalSize += i.Size
|
||||
table.Append(row)
|
||||
}
|
||||
|
||||
// align total column based on digest visibility
|
||||
if showDigests {
|
||||
table.SetFooter([]string{"", "", "", "", "Total", byteCountSI(totalSize)})
|
||||
} else {
|
||||
table.SetFooter([]string{"", "", "", "Total", byteCountSI(totalSize)})
|
||||
}
|
||||
table.SetFooter([]string{"", "", "", "Total", byteCountSI(totalSize)})
|
||||
|
||||
table.Render()
|
||||
}
|
||||
|
||||
// truncateReference shortens the digest of a reference
|
||||
func truncateReference(ref string) string {
|
||||
const prefix = "@sha256:"
|
||||
idx := strings.Index(ref, prefix)
|
||||
if idx == -1 {
|
||||
return ref
|
||||
}
|
||||
if len(ref) > idx+len(prefix)+12 {
|
||||
return ref[:idx+len(prefix)+12] + "…"
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
func buildJson(item ...item) string {
|
||||
data, err := json.MarshalIndent(item, "", " ")
|
||||
if err != nil {
|
||||
@@ -186,6 +243,7 @@ type item struct {
|
||||
Reference string
|
||||
Type string
|
||||
Platform string
|
||||
Digest string
|
||||
Layers int
|
||||
Size int64
|
||||
}
|
||||
@@ -210,6 +268,13 @@ func (a byReferenceAndArch) Less(i, j int) bool {
|
||||
return a[i].Reference < a[j].Reference
|
||||
}
|
||||
|
||||
// overrides the digest with a specific per platform digest
|
||||
func newItemWithDigest(s *store.Layout, digestStr string, desc ocispec.Descriptor, m ocispec.Manifest, plat string, o *flags.InfoOpts) item {
|
||||
item := newItem(s, desc, m, plat, o)
|
||||
item.Digest = digestStr
|
||||
return item
|
||||
}
|
||||
|
||||
func newItem(s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, plat string, o *flags.InfoOpts) item {
|
||||
var size int64 = 0
|
||||
for _, l := range m.Layers {
|
||||
@@ -255,6 +320,7 @@ func newItem(s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, plat
|
||||
Reference: ref.Name(),
|
||||
Type: ctype,
|
||||
Platform: plat,
|
||||
Digest: desc.Digest.String(),
|
||||
Layers: len(m.Layers),
|
||||
Size: size,
|
||||
}
|
||||
|
||||
@@ -2,45 +2,29 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/archives"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// LoadCmd
|
||||
// TODO: Just use mholt/archiver for now, even though we don't need most of it
|
||||
func LoadCmd(ctx context.Context, o *flags.LoadOpts, archiveRefs ...string) error {
|
||||
// extracts the contents of an archived oci layout to an existing oci layout
|
||||
func LoadCmd(ctx context.Context, o *flags.LoadOpts, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
storeDir := o.StoreDir
|
||||
|
||||
if storeDir == "" {
|
||||
storeDir = os.Getenv(consts.HaulerStoreDir)
|
||||
}
|
||||
|
||||
if storeDir == "" {
|
||||
storeDir = consts.DefaultStoreName
|
||||
}
|
||||
|
||||
for _, archiveRef := range archiveRefs {
|
||||
l.Infof("loading content from [%s] to [%s]", archiveRef, storeDir)
|
||||
err := unarchiveLayoutTo(ctx, archiveRef, storeDir, o.TempOverride)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// unarchiveLayoutTo accepts an archived oci layout and extracts the contents to an existing oci layout, preserving the index
|
||||
func unarchiveLayoutTo(ctx context.Context, archivePath string, dest string, tempOverride string) error {
|
||||
l := log.FromContext(ctx)
|
||||
tempOverride := o.TempOverride
|
||||
|
||||
if tempOverride == "" {
|
||||
tempOverride = os.Getenv(consts.HaulerTempDir)
|
||||
@@ -54,7 +38,89 @@ func unarchiveLayoutTo(ctx context.Context, archivePath string, dest string, tem
|
||||
|
||||
l.Debugf("using temporary directory at [%s]", tempDir)
|
||||
|
||||
if err := archives.Unarchive(ctx, archivePath, tempDir); err != nil {
|
||||
for _, fileName := range o.FileName {
|
||||
l.Infof("loading haul [%s] to [%s]", fileName, o.StoreDir)
|
||||
err := unarchiveLayoutTo(ctx, fileName, o.StoreDir, tempDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// accepts an archived OCI layout, extracts the contents to an existing OCI layout, and preserves the index
|
||||
func unarchiveLayoutTo(ctx context.Context, haulPath string, dest string, tempDir string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if strings.HasPrefix(haulPath, "http://") || strings.HasPrefix(haulPath, "https://") {
|
||||
l.Debugf("detected remote archive... starting download... [%s]", haulPath)
|
||||
|
||||
h := getter.NewHttp()
|
||||
parsedURL, err := url.Parse(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := h.Open(ctx, parsedURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
fileName := h.Name(parsedURL)
|
||||
if fileName == "" {
|
||||
fileName = filepath.Base(parsedURL.Path)
|
||||
}
|
||||
haulPath = filepath.Join(tempDir, fileName)
|
||||
|
||||
out, err := os.Create(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
if _, err = io.Copy(out, rc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := archives.Unarchive(ctx, haulPath, tempDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ensure the incoming index.json has the correct annotations.
|
||||
data, err := os.ReadFile(tempDir + "/index.json")
|
||||
if err != nil {
|
||||
return (err)
|
||||
}
|
||||
|
||||
var idx ocispec.Index
|
||||
if err := json.Unmarshal(data, &idx); err != nil {
|
||||
return (err)
|
||||
}
|
||||
|
||||
for i := range idx.Manifests {
|
||||
if idx.Manifests[i].Annotations == nil {
|
||||
idx.Manifests[i].Annotations = make(map[string]string)
|
||||
}
|
||||
if _, exists := idx.Manifests[i].Annotations[consts.KindAnnotationName]; !exists {
|
||||
idx.Manifests[i].Annotations[consts.KindAnnotationName] = consts.KindAnnotationImage
|
||||
}
|
||||
if ref, ok := idx.Manifests[i].Annotations[consts.ContainerdImageNameKey]; ok {
|
||||
if slash := strings.Index(ref, "/"); slash != -1 {
|
||||
ref = ref[slash+1:]
|
||||
}
|
||||
if idx.Manifests[i].Annotations[consts.ImageRefKey] != ref {
|
||||
idx.Manifests[i].Annotations[consts.ImageRefKey] = ref
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out, err := json.MarshalIndent(idx, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.WriteFile(tempDir+"/index.json", out, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -23,22 +23,11 @@ import (
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
// SaveCmd
|
||||
// TODO: Just use mholt/archiver for now, even though we don't need most of it
|
||||
func SaveCmd(ctx context.Context, o *flags.SaveOpts, outputFile string) error {
|
||||
// saves a content store to store archives
|
||||
func SaveCmd(ctx context.Context, o *flags.SaveOpts, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
storeDir := o.StoreDir
|
||||
|
||||
if storeDir == "" {
|
||||
storeDir = os.Getenv(consts.HaulerStoreDir)
|
||||
}
|
||||
|
||||
if storeDir == "" {
|
||||
storeDir = consts.DefaultStoreName
|
||||
}
|
||||
|
||||
// Maps to handle compression and archival types
|
||||
// maps to handle compression and archival types
|
||||
compressionMap := archives.CompressionMap
|
||||
archivalMap := archives.ArchivalMap
|
||||
|
||||
@@ -47,7 +36,7 @@ func SaveCmd(ctx context.Context, o *flags.SaveOpts, outputFile string) error {
|
||||
compression := compressionMap["zst"]
|
||||
archival := archivalMap["tar"]
|
||||
|
||||
absOutputfile, err := filepath.Abs(outputFile)
|
||||
absOutputfile, err := filepath.Abs(o.FileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -57,7 +46,7 @@ func SaveCmd(ctx context.Context, o *flags.SaveOpts, outputFile string) error {
|
||||
return err
|
||||
}
|
||||
defer os.Chdir(cwd)
|
||||
if err := os.Chdir(storeDir); err != nil {
|
||||
if err := os.Chdir(o.StoreDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -72,7 +61,7 @@ func SaveCmd(ctx context.Context, o *flags.SaveOpts, outputFile string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("saved store [%s] -> [%s]", storeDir, absOutputfile)
|
||||
l.Infof("saving store [%s] to archive [%s]", o.StoreDir, o.FileName)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -111,7 +100,7 @@ func writeExportsManifest(ctx context.Context, dir string, platformStr string) e
|
||||
}
|
||||
|
||||
for _, desc := range imx.Manifests {
|
||||
l.Debugf("descriptor [%s] >>> %s", desc.Digest.String(), desc.MediaType)
|
||||
l.Debugf("descriptor [%s] = [%s]", desc.Digest.String(), desc.MediaType)
|
||||
if artifactType := types.MediaType(desc.ArtifactType); artifactType != "" && !artifactType.IsImage() && !artifactType.IsIndex() {
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING ARTIFACT [%q]", desc.Digest.String(), desc.ArtifactType)
|
||||
continue
|
||||
@@ -127,11 +116,11 @@ func writeExportsManifest(ctx context.Context, dir string, platformStr string) e
|
||||
return err
|
||||
}
|
||||
case consts.KindAnnotationIndex:
|
||||
l.Debugf("index [%s]: digest=%s, type=%s, size=%d", refName, desc.Digest.String(), desc.MediaType, desc.Size)
|
||||
l.Debugf("index [%s]: digest=[%s]... type=[%s]... size=[%d]", refName, desc.Digest.String(), desc.MediaType, desc.Size)
|
||||
|
||||
// when no platform is provided, warn the user of potential mismatch on import
|
||||
if platform.String() == "" {
|
||||
l.Warnf("index [%s]: provide an export platform to prevent potential platform mismatch on import", refName)
|
||||
l.Warnf("specify an export platform to prevent potential platform mismatch on import of index [%s]", refName)
|
||||
}
|
||||
|
||||
iix, err := idx.ImageIndex(desc.Digest)
|
||||
@@ -147,14 +136,14 @@ func writeExportsManifest(ctx context.Context, dir string, platformStr string) e
|
||||
// check if platform is provided, if so, skip anything that doesn't match
|
||||
if platform.String() != "" {
|
||||
if ixd.Platform.Architecture != platform.Architecture || ixd.Platform.OS != platform.OS {
|
||||
l.Warnf("index [%s]: digest=%s, platform=%s/%s: does not match the supplied platform, skipping", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture)
|
||||
l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: does not match the supplied platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// skip 'unknown' platforms... docker hates
|
||||
if ixd.Platform.Architecture == "unknown" && ixd.Platform.OS == "unknown" {
|
||||
l.Warnf("index [%s]: digest=%s, platform=%s/%s: skipping 'unknown/unknown' platform", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture)
|
||||
l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: matches unknown platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,9 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
@@ -13,12 +15,15 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
convert "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/convert"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
v1alpha1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
tchart "hauler.dev/go/hauler/pkg/collection/chart"
|
||||
"hauler.dev/go/hauler/pkg/collection/imagetxt"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
"hauler.dev/go/hauler/pkg/cosign"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
@@ -27,10 +32,24 @@ import (
|
||||
func SyncCmd(ctx context.Context, o *flags.SyncOpts, s *store.Layout, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// if passed products, check for a remote manifest to retrieve and use.
|
||||
for _, product := range o.Products {
|
||||
l.Infof("processing content file for product [%s]", product)
|
||||
parts := strings.Split(product, "=")
|
||||
tempOverride := o.TempOverride
|
||||
|
||||
if tempOverride == "" {
|
||||
tempOverride = os.Getenv(consts.HaulerTempDir)
|
||||
}
|
||||
|
||||
tempDir, err := os.MkdirTemp(tempOverride, consts.DefaultHaulerTempDirName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
l.Debugf("using temporary directory at [%s]", tempDir)
|
||||
|
||||
// if passed products, check for a remote manifest to retrieve and use
|
||||
for _, productName := range o.Products {
|
||||
l.Infof("processing product manifest for [%s] to store [%s]", productName, o.StoreDir)
|
||||
parts := strings.Split(productName, "=")
|
||||
tag := strings.ReplaceAll(parts[1], "+", "-")
|
||||
|
||||
ProductRegistry := o.ProductRegistry // cli flag
|
||||
@@ -40,8 +59,8 @@ func SyncCmd(ctx context.Context, o *flags.SyncOpts, s *store.Layout, rso *flags
|
||||
}
|
||||
|
||||
manifestLoc := fmt.Sprintf("%s/hauler/%s-manifest.yaml:%s", ProductRegistry, parts[0], tag)
|
||||
l.Infof("retrieving product manifest from [%s]", manifestLoc)
|
||||
img := v1alpha1.Image{
|
||||
l.Infof("fetching product manifest from [%s]", manifestLoc)
|
||||
img := v1.Image{
|
||||
Name: manifestLoc,
|
||||
}
|
||||
err := storeImage(ctx, s, img, o.Platform, rso, ro)
|
||||
@@ -52,9 +71,9 @@ func SyncCmd(ctx context.Context, o *flags.SyncOpts, s *store.Layout, rso *flags
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filename := fmt.Sprintf("%s-manifest.yaml", parts[0])
|
||||
fileName := fmt.Sprintf("%s-manifest.yaml", parts[0])
|
||||
|
||||
fi, err := os.Open(filename)
|
||||
fi, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -62,19 +81,57 @@ func SyncCmd(ctx context.Context, o *flags.SyncOpts, s *store.Layout, rso *flags
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Infof("processing completed successfully")
|
||||
}
|
||||
|
||||
// if passed a local manifest, process it
|
||||
for _, filename := range o.ContentFiles {
|
||||
l.Debugf("processing content file: [%s]", filename)
|
||||
fi, err := os.Open(filename)
|
||||
// If passed a local manifest, process it
|
||||
for _, fileName := range o.FileName {
|
||||
l.Infof("processing manifest [%s] to store [%s]", fileName, o.StoreDir)
|
||||
|
||||
haulPath := fileName
|
||||
if strings.HasPrefix(haulPath, "http://") || strings.HasPrefix(haulPath, "https://") {
|
||||
l.Debugf("detected remote manifest... starting download... [%s]", haulPath)
|
||||
|
||||
h := getter.NewHttp()
|
||||
parsedURL, err := url.Parse(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := h.Open(ctx, parsedURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
fileName := h.Name(parsedURL)
|
||||
if fileName == "" {
|
||||
fileName = filepath.Base(parsedURL.Path)
|
||||
}
|
||||
haulPath = filepath.Join(tempDir, fileName)
|
||||
|
||||
out, err := os.Create(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
if _, err = io.Copy(out, rc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fi, err := os.Open(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fi.Close()
|
||||
|
||||
err = processContent(ctx, fi, o, s, rso, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("processing completed successfully")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -94,164 +151,470 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
docs = append(docs, raw)
|
||||
}
|
||||
|
||||
for _, doc := range docs {
|
||||
obj, err := content.Load(doc)
|
||||
if err != nil {
|
||||
l.Debugf("skipping sync of unknown content")
|
||||
l.Warnf("skipping syncing due to %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
l.Infof("syncing [%s] to store", obj.GroupVersionKind().String())
|
||||
gvk := obj.GroupVersionKind()
|
||||
l.Infof("syncing content [%s] with [kind=%s] to store [%s]", gvk.GroupVersion(), gvk.Kind, o.StoreDir)
|
||||
|
||||
switch gvk.Kind {
|
||||
|
||||
// TODO: Should type switch instead...
|
||||
switch obj.GroupVersionKind().Kind {
|
||||
case consts.FilesContentKind:
|
||||
var cfg v1alpha1.Files
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
|
||||
for _, f := range cfg.Spec.Files {
|
||||
err := storeFile(ctx, s, f)
|
||||
if err != nil {
|
||||
var alphaCfg v1alpha1.Files
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.Files
|
||||
if err := convert.ConvertFiles(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range v1Cfg.Spec.Files {
|
||||
if err := storeFile(ctx, s, f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case "v1":
|
||||
var cfg v1.Files
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range cfg.Spec.Files {
|
||||
if err := storeFile(ctx, s, f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
case consts.ImagesContentKind:
|
||||
var cfg v1alpha1.Images
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
a := cfg.GetAnnotations()
|
||||
for _, i := range cfg.Spec.Images {
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
|
||||
// Check if the user provided a registry. If a registry is provided in the annotation, use it for the images that don't have a registry in their ref name.
|
||||
if a[consts.ImageAnnotationRegistry] != "" || o.Registry != "" {
|
||||
newRef, _ := reference.Parse(i.Name)
|
||||
|
||||
newReg := o.Registry // cli flag
|
||||
// if no cli flag but there was an annotation, use the annotation.
|
||||
if o.Registry == "" && a[consts.ImageAnnotationRegistry] != "" {
|
||||
newReg = a[consts.ImageAnnotationRegistry]
|
||||
}
|
||||
|
||||
if newRef.Context().RegistryStr() == "" {
|
||||
newRef, err = reference.Relocate(i.Name, newReg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
i.Name = newRef.Name()
|
||||
}
|
||||
|
||||
// Check if the user provided a key. The flag from the CLI takes precedence over the annotation. The individual image key takes precedence over both.
|
||||
if a[consts.ImageAnnotationKey] != "" || o.Key != "" || i.Key != "" {
|
||||
key := o.Key // cli flag
|
||||
// if no cli flag but there was an annotation, use the annotation.
|
||||
if o.Key == "" && a[consts.ImageAnnotationKey] != "" {
|
||||
key, err = homedir.Expand(a[consts.ImageAnnotationKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// the individual image key trumps all
|
||||
if i.Key != "" {
|
||||
key, err = homedir.Expand(i.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.Debugf("key for image [%s]", key)
|
||||
|
||||
// verify signature using the provided key.
|
||||
err := cosign.VerifySignature(ctx, s, key, i.Name, rso, ro)
|
||||
if err != nil {
|
||||
l.Errorf("signature verification failed for image [%s]. ** hauler will skip adding this image to the store **:\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", i.Name)
|
||||
}
|
||||
|
||||
// Check if the user provided a platform. The flag from the CLI takes precedence over the annotation. The individual image platform takes precedence over both.
|
||||
platform := o.Platform // cli flag
|
||||
// if no cli flag but there was an annotation, use the annotation.
|
||||
if o.Platform == "" && a[consts.ImageAnnotationPlatform] != "" {
|
||||
platform = a[consts.ImageAnnotationPlatform]
|
||||
}
|
||||
// the individual image platform trumps all
|
||||
if i.Platform != "" {
|
||||
platform = i.Platform
|
||||
}
|
||||
|
||||
err = storeImage(ctx, s, i, platform, rso, ro)
|
||||
if err != nil {
|
||||
var alphaCfg v1alpha1.Images
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.Images
|
||||
if err := convert.ConvertImages(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a := v1Cfg.GetAnnotations()
|
||||
for _, i := range v1Cfg.Spec.Images {
|
||||
|
||||
if a[consts.ImageAnnotationRegistry] != "" || o.Registry != "" {
|
||||
newRef, _ := reference.Parse(i.Name)
|
||||
newReg := o.Registry
|
||||
if o.Registry == "" && a[consts.ImageAnnotationRegistry] != "" {
|
||||
newReg = a[consts.ImageAnnotationRegistry]
|
||||
}
|
||||
if newRef.Context().RegistryStr() == "" {
|
||||
newRef, err = reference.Relocate(i.Name, newReg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
i.Name = newRef.Name()
|
||||
}
|
||||
|
||||
hasAnnotationIdentityOptions := a[consts.ImageAnnotationCertIdentityRegexp] != "" || a[consts.ImageAnnotationCertIdentity] != ""
|
||||
hasCliIdentityOptions := o.CertIdentityRegexp != "" || o.CertIdentity != ""
|
||||
hasImageIdentityOptions := i.CertIdentityRegexp != "" || i.CertIdentity != ""
|
||||
|
||||
needsKeylessVerificaton := hasAnnotationIdentityOptions || hasCliIdentityOptions || hasImageIdentityOptions
|
||||
needsPubKeyVerification := a[consts.ImageAnnotationKey] != "" || o.Key != "" || i.Key != ""
|
||||
if needsPubKeyVerification {
|
||||
key := o.Key
|
||||
if o.Key == "" && a[consts.ImageAnnotationKey] != "" {
|
||||
key, err = homedir.Expand(a[consts.ImageAnnotationKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if i.Key != "" {
|
||||
key, err = homedir.Expand(i.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.Debugf("key for image [%s]", key)
|
||||
|
||||
tlog := o.Tlog
|
||||
if !o.Tlog && a[consts.ImageAnnotationTlog] == "true" {
|
||||
tlog = true
|
||||
}
|
||||
if i.Tlog {
|
||||
tlog = i.Tlog
|
||||
}
|
||||
l.Debugf("transparency log for verification [%b]", tlog)
|
||||
|
||||
if err := cosign.VerifySignature(ctx, s, key, tlog, i.Name, rso, ro); err != nil {
|
||||
l.Errorf("signature verification failed for image [%s]... skipping...\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", i.Name)
|
||||
} else if needsKeylessVerificaton { //Keyless signature verification
|
||||
certIdentityRegexp := o.CertIdentityRegexp
|
||||
if o.CertIdentityRegexp == "" && a[consts.ImageAnnotationCertIdentityRegexp] != "" {
|
||||
certIdentityRegexp = a[consts.ImageAnnotationCertIdentityRegexp]
|
||||
}
|
||||
if i.CertIdentityRegexp != "" {
|
||||
certIdentityRegexp = i.CertIdentityRegexp
|
||||
}
|
||||
l.Debugf("certIdentityRegexp for image [%s]", certIdentityRegexp)
|
||||
|
||||
certIdentity := o.CertIdentity
|
||||
if o.CertIdentity == "" && a[consts.ImageAnnotationCertIdentity] != "" {
|
||||
certIdentity = a[consts.ImageAnnotationCertIdentity]
|
||||
}
|
||||
if i.CertIdentity != "" {
|
||||
certIdentity = i.CertIdentity
|
||||
}
|
||||
l.Debugf("certIdentity for image [%s]", certIdentity)
|
||||
|
||||
certOidcIssuer := o.CertOidcIssuer
|
||||
if o.CertOidcIssuer == "" && a[consts.ImageAnnotationCertOidcIssuer] != "" {
|
||||
certOidcIssuer = a[consts.ImageAnnotationCertOidcIssuer]
|
||||
}
|
||||
if i.CertOidcIssuer != "" {
|
||||
certOidcIssuer = i.CertOidcIssuer
|
||||
}
|
||||
l.Debugf("certOidcIssuer for image [%s]", certOidcIssuer)
|
||||
|
||||
certOidcIssuerRegexp := o.CertOidcIssuerRegexp
|
||||
if o.CertOidcIssuerRegexp == "" && a[consts.ImageAnnotationCertOidcIssuerRegexp] != "" {
|
||||
certOidcIssuerRegexp = a[consts.ImageAnnotationCertOidcIssuerRegexp]
|
||||
}
|
||||
if i.CertOidcIssuerRegexp != "" {
|
||||
certOidcIssuerRegexp = i.CertOidcIssuerRegexp
|
||||
}
|
||||
l.Debugf("certOidcIssuerRegexp for image [%s]", certOidcIssuerRegexp)
|
||||
|
||||
certGithubWorkflowRepository := o.CertGithubWorkflowRepository
|
||||
if o.CertGithubWorkflowRepository == "" && a[consts.ImageAnnotationCertGithubWorkflowRepository] != "" {
|
||||
certGithubWorkflowRepository = a[consts.ImageAnnotationCertGithubWorkflowRepository]
|
||||
}
|
||||
if i.CertGithubWorkflowRepository != "" {
|
||||
certGithubWorkflowRepository = i.CertGithubWorkflowRepository
|
||||
}
|
||||
l.Debugf("certGithubWorkflowRepository for image [%s]", certGithubWorkflowRepository)
|
||||
|
||||
tlog := o.Tlog
|
||||
if !o.Tlog && a[consts.ImageAnnotationTlog] == "true" {
|
||||
tlog = true
|
||||
}
|
||||
if i.Tlog {
|
||||
tlog = i.Tlog
|
||||
}
|
||||
l.Debugf("transparency log for verification [%b]", tlog)
|
||||
|
||||
if err := cosign.VerifyKeylessSignature(ctx, s, certIdentity, certIdentityRegexp, certOidcIssuer, certOidcIssuerRegexp, certGithubWorkflowRepository, tlog, i.Name, rso, ro); err != nil {
|
||||
l.Errorf("keyless signature verification failed for image [%s]... skipping...\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("keyless signature verified for image [%s]", i.Name)
|
||||
}
|
||||
|
||||
platform := o.Platform
|
||||
if o.Platform == "" && a[consts.ImageAnnotationPlatform] != "" {
|
||||
platform = a[consts.ImageAnnotationPlatform]
|
||||
}
|
||||
if i.Platform != "" {
|
||||
platform = i.Platform
|
||||
}
|
||||
|
||||
if err := storeImage(ctx, s, i, platform, rso, ro); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.CopyAll(ctx, s.OCI, nil)
|
||||
|
||||
case "v1":
|
||||
var cfg v1.Images
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a := cfg.GetAnnotations()
|
||||
for _, i := range cfg.Spec.Images {
|
||||
|
||||
if a[consts.ImageAnnotationRegistry] != "" || o.Registry != "" {
|
||||
newRef, _ := reference.Parse(i.Name)
|
||||
newReg := o.Registry
|
||||
if o.Registry == "" && a[consts.ImageAnnotationRegistry] != "" {
|
||||
newReg = a[consts.ImageAnnotationRegistry]
|
||||
}
|
||||
if newRef.Context().RegistryStr() == "" {
|
||||
newRef, err = reference.Relocate(i.Name, newReg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
i.Name = newRef.Name()
|
||||
}
|
||||
|
||||
hasAnnotationIdentityOptions := a[consts.ImageAnnotationCertIdentityRegexp] != "" || a[consts.ImageAnnotationCertIdentity] != ""
|
||||
hasCliIdentityOptions := o.CertIdentityRegexp != "" || o.CertIdentity != ""
|
||||
hasImageIdentityOptions := i.CertIdentityRegexp != "" || i.CertIdentity != ""
|
||||
|
||||
needsKeylessVerificaton := hasAnnotationIdentityOptions || hasCliIdentityOptions || hasImageIdentityOptions
|
||||
needsPubKeyVerification := a[consts.ImageAnnotationKey] != "" || o.Key != "" || i.Key != ""
|
||||
if needsPubKeyVerification {
|
||||
key := o.Key
|
||||
if o.Key == "" && a[consts.ImageAnnotationKey] != "" {
|
||||
key, err = homedir.Expand(a[consts.ImageAnnotationKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if i.Key != "" {
|
||||
key, err = homedir.Expand(i.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.Debugf("key for image [%s]", key)
|
||||
|
||||
tlog := o.Tlog
|
||||
if !o.Tlog && a[consts.ImageAnnotationTlog] == "true" {
|
||||
tlog = true
|
||||
}
|
||||
if i.Tlog {
|
||||
tlog = i.Tlog
|
||||
}
|
||||
l.Debugf("transparency log for verification [%b]", tlog)
|
||||
|
||||
if err := cosign.VerifySignature(ctx, s, key, tlog, i.Name, rso, ro); err != nil {
|
||||
l.Errorf("signature verification failed for image [%s]... skipping...\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", i.Name)
|
||||
} else if needsKeylessVerificaton { //Keyless signature verification
|
||||
certIdentityRegexp := o.CertIdentityRegexp
|
||||
if o.CertIdentityRegexp == "" && a[consts.ImageAnnotationCertIdentityRegexp] != "" {
|
||||
certIdentityRegexp = a[consts.ImageAnnotationCertIdentityRegexp]
|
||||
}
|
||||
if i.CertIdentityRegexp != "" {
|
||||
certIdentityRegexp = i.CertIdentityRegexp
|
||||
}
|
||||
l.Debugf("certIdentityRegexp for image [%s]", certIdentityRegexp)
|
||||
|
||||
certIdentity := o.CertIdentity
|
||||
if o.CertIdentity == "" && a[consts.ImageAnnotationCertIdentity] != "" {
|
||||
certIdentity = a[consts.ImageAnnotationCertIdentity]
|
||||
}
|
||||
if i.CertIdentity != "" {
|
||||
certIdentity = i.CertIdentity
|
||||
}
|
||||
l.Debugf("certIdentity for image [%s]", certIdentity)
|
||||
|
||||
certOidcIssuer := o.CertOidcIssuer
|
||||
if o.CertOidcIssuer == "" && a[consts.ImageAnnotationCertOidcIssuer] != "" {
|
||||
certOidcIssuer = a[consts.ImageAnnotationCertOidcIssuer]
|
||||
}
|
||||
if i.CertOidcIssuer != "" {
|
||||
certOidcIssuer = i.CertOidcIssuer
|
||||
}
|
||||
l.Debugf("certOidcIssuer for image [%s]", certOidcIssuer)
|
||||
|
||||
certOidcIssuerRegexp := o.CertOidcIssuerRegexp
|
||||
if o.CertOidcIssuerRegexp == "" && a[consts.ImageAnnotationCertOidcIssuerRegexp] != "" {
|
||||
certOidcIssuerRegexp = a[consts.ImageAnnotationCertOidcIssuerRegexp]
|
||||
}
|
||||
if i.CertOidcIssuerRegexp != "" {
|
||||
certOidcIssuerRegexp = i.CertOidcIssuerRegexp
|
||||
}
|
||||
l.Debugf("certOidcIssuerRegexp for image [%s]", certOidcIssuerRegexp)
|
||||
|
||||
certGithubWorkflowRepository := o.CertGithubWorkflowRepository
|
||||
if o.CertGithubWorkflowRepository == "" && a[consts.ImageAnnotationCertGithubWorkflowRepository] != "" {
|
||||
certGithubWorkflowRepository = a[consts.ImageAnnotationCertGithubWorkflowRepository]
|
||||
}
|
||||
if i.CertGithubWorkflowRepository != "" {
|
||||
certGithubWorkflowRepository = i.CertGithubWorkflowRepository
|
||||
}
|
||||
l.Debugf("certGithubWorkflowRepository for image [%s]", certGithubWorkflowRepository)
|
||||
|
||||
tlog := o.Tlog
|
||||
if !o.Tlog && a[consts.ImageAnnotationTlog] == "true" {
|
||||
tlog = true
|
||||
}
|
||||
if i.Tlog {
|
||||
tlog = i.Tlog
|
||||
}
|
||||
l.Debugf("transparency log for verification [%b]", tlog)
|
||||
|
||||
if err := cosign.VerifyKeylessSignature(ctx, s, certIdentity, certIdentityRegexp, certOidcIssuer, certOidcIssuerRegexp, certGithubWorkflowRepository, tlog, i.Name, rso, ro); err != nil {
|
||||
l.Errorf("keyless signature verification failed for image [%s]... skipping...\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("keyless signature verified for image [%s]", i.Name)
|
||||
}
|
||||
platform := o.Platform
|
||||
if o.Platform == "" && a[consts.ImageAnnotationPlatform] != "" {
|
||||
platform = a[consts.ImageAnnotationPlatform]
|
||||
}
|
||||
if i.Platform != "" {
|
||||
platform = i.Platform
|
||||
}
|
||||
|
||||
if err := storeImage(ctx, s, i, platform, rso, ro); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.CopyAll(ctx, s.OCI, nil)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
// sync with local index
|
||||
s.CopyAll(ctx, s.OCI, nil)
|
||||
|
||||
case consts.ChartsContentKind:
|
||||
var cfg v1alpha1.Charts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
|
||||
for _, ch := range cfg.Spec.Charts {
|
||||
// TODO: Provide a way to configure syncs
|
||||
err := storeChart(ctx, s, ch, &action.ChartPathOptions{})
|
||||
if err != nil {
|
||||
var alphaCfg v1alpha1.Charts
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.Charts
|
||||
if err := convert.ConvertCharts(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ch := range v1Cfg.Spec.Charts {
|
||||
if err := storeChart(ctx, s, ch, &action.ChartPathOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case "v1":
|
||||
var cfg v1.Charts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ch := range cfg.Spec.Charts {
|
||||
if err := storeChart(ctx, s, ch, &action.ChartPathOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
case consts.ChartsCollectionKind:
|
||||
var cfg v1alpha1.ThickCharts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
|
||||
for _, cfg := range cfg.Spec.Charts {
|
||||
tc, err := tchart.NewThickChart(cfg, &action.ChartPathOptions{
|
||||
RepoURL: cfg.RepoURL,
|
||||
Version: cfg.Version,
|
||||
})
|
||||
if err != nil {
|
||||
var alphaCfg v1alpha1.ThickCharts
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := s.AddOCICollection(ctx, tc); err != nil {
|
||||
var v1Cfg v1.ThickCharts
|
||||
if err := convert.ConvertThickCharts(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, chObj := range v1Cfg.Spec.Charts {
|
||||
tc, err := tchart.NewThickChart(chObj, &action.ChartPathOptions{
|
||||
RepoURL: chObj.RepoURL,
|
||||
Version: chObj.Version,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.AddOCICollection(ctx, tc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case "v1":
|
||||
var cfg v1.ThickCharts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, chObj := range cfg.Spec.Charts {
|
||||
tc, err := tchart.NewThickChart(chObj, &action.ChartPathOptions{
|
||||
RepoURL: chObj.RepoURL,
|
||||
Version: chObj.Version,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.AddOCICollection(ctx, tc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
case consts.ImageTxtsContentKind:
|
||||
var cfg v1alpha1.ImageTxts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
|
||||
for _, cfgIt := range cfg.Spec.ImageTxts {
|
||||
it, err := imagetxt.New(cfgIt.Ref,
|
||||
imagetxt.WithIncludeSources(cfgIt.Sources.Include...),
|
||||
imagetxt.WithExcludeSources(cfgIt.Sources.Exclude...),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert ImageTxt %s: %v", cfg.Name, err)
|
||||
var alphaCfg v1alpha1.ImageTxts
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.ImageTxts
|
||||
if err := convert.ConvertImageTxts(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, cfgIt := range v1Cfg.Spec.ImageTxts {
|
||||
it, err := imagetxt.New(cfgIt.Ref,
|
||||
imagetxt.WithIncludeSources(cfgIt.Sources.Include...),
|
||||
imagetxt.WithExcludeSources(cfgIt.Sources.Exclude...),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert ImageTxt %s: %v", v1Cfg.Name, err)
|
||||
}
|
||||
if _, err := s.AddOCICollection(ctx, it); err != nil {
|
||||
return fmt.Errorf("add ImageTxt %s to store: %v", v1Cfg.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s.AddOCICollection(ctx, it); err != nil {
|
||||
return fmt.Errorf("add ImageTxt %s to store: %v", cfg.Name, err)
|
||||
case "v1":
|
||||
var cfg v1.ImageTxts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, cfgIt := range cfg.Spec.ImageTxts {
|
||||
it, err := imagetxt.New(cfgIt.Ref,
|
||||
imagetxt.WithIncludeSources(cfgIt.Sources.Include...),
|
||||
imagetxt.WithExcludeSources(cfgIt.Sources.Exclude...),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert ImageTxt %s: %v", cfg.Name, err)
|
||||
}
|
||||
if _, err := s.AddOCICollection(ctx, it); err != nil {
|
||||
return fmt.Errorf("add ImageTxt %s to store: %v", cfg.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unrecognized content or collection type: %s", obj.GroupVersionKind().String())
|
||||
return fmt.Errorf("unsupported kind [%s]... valid kinds are [Files, Images, Charts, ThickCharts, ImageTxts]", gvk.Kind)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
401
go.mod
401
go.mod
@@ -1,49 +1,52 @@
|
||||
module hauler.dev/go/hauler
|
||||
|
||||
go 1.23
|
||||
go 1.25.3
|
||||
|
||||
toolchain go1.23.4
|
||||
replace github.com/sigstore/cosign/v3 => github.com/hauler-dev/cosign/v3 v3.0.3-0.20251212172429-4984f8e197f6
|
||||
|
||||
replace github.com/sigstore/cosign/v2 => github.com/hauler-dev/cosign/v2 v2.4.2-0.20250118012335-ee9b762a922a
|
||||
replace github.com/distribution/distribution/v3 => github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2
|
||||
|
||||
replace github.com/olekukonko/tablewriter => github.com/olekukonko/tablewriter v0.0.5
|
||||
|
||||
replace github.com/docker/cli => github.com/docker/cli v28.5.1+incompatible
|
||||
|
||||
require (
|
||||
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be
|
||||
github.com/containerd/containerd v1.7.23
|
||||
github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2
|
||||
github.com/google/go-containerregistry v0.20.2
|
||||
github.com/gorilla/handlers v1.5.1
|
||||
github.com/containerd/containerd v1.7.29
|
||||
github.com/distribution/distribution/v3 v3.0.0
|
||||
github.com/google/go-containerregistry v0.20.7
|
||||
github.com/gorilla/handlers v1.5.2
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/mholt/archives v0.1.0
|
||||
github.com/mholt/archives v0.1.5
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/olekukonko/tablewriter v1.1.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
github.com/opencontainers/image-spec v1.1.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rs/zerolog v1.31.0
|
||||
github.com/sigstore/cosign/v2 v2.4.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.11.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
golang.org/x/sync v0.10.0
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/sigstore/cosign/v3 v3.0.2
|
||||
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af
|
||||
github.com/spf13/afero v1.15.0
|
||||
github.com/spf13/cobra v1.10.2
|
||||
golang.org/x/sync v0.18.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
helm.sh/helm/v3 v3.16.3
|
||||
k8s.io/apimachinery v0.31.3
|
||||
k8s.io/client-go v0.31.3
|
||||
oras.land/oras-go v1.2.5
|
||||
helm.sh/helm/v3 v3.19.0
|
||||
k8s.io/apimachinery v0.34.1
|
||||
k8s.io/client-go v0.34.1
|
||||
oras.land/oras-go v1.2.7
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.9.3 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.5.0 // indirect
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2 // indirect
|
||||
cuelang.org/go v0.9.2 // indirect
|
||||
cloud.google.com/go/auth v0.17.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.9.0 // indirect
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20250715075730-49cab49c8e9d // indirect
|
||||
cuelang.org/go v0.14.1 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
|
||||
@@ -52,19 +55,17 @@ require (
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.4.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect
|
||||
github.com/STARRY-S/zip v0.2.1 // indirect
|
||||
github.com/STARRY-S/zip v0.2.3 // indirect
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect
|
||||
github.com/ThalesIgnite/crypto11 v1.2.5 // indirect
|
||||
github.com/agnivade/levenshtein v1.1.1 // indirect
|
||||
github.com/agnivade/levenshtein v1.2.1 // indirect
|
||||
github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect
|
||||
github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect
|
||||
github.com/alibabacloud-go/cr-20181201 v1.0.10 // indirect
|
||||
@@ -76,265 +77,283 @@ require (
|
||||
github.com/alibabacloud-go/tea-utils v1.4.5 // indirect
|
||||
github.com/alibabacloud-go/tea-xml v1.1.3 // indirect
|
||||
github.com/aliyun/credentials-go v1.3.2 // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.33 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 // indirect
|
||||
github.com/aws/smithy-go v1.20.4 // indirect
|
||||
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.20 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.24 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.40.2 // indirect
|
||||
github.com/aws/smithy-go v1.23.2 // indirect
|
||||
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.10.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.1 // indirect
|
||||
github.com/bodgit/windows v1.0.1 // indirect
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect
|
||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd // indirect
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b // indirect
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 // indirect
|
||||
github.com/buildkite/agent/v3 v3.81.0 // indirect
|
||||
github.com/buildkite/go-pipeline v0.13.1 // indirect
|
||||
github.com/buildkite/interpolate v0.1.3 // indirect
|
||||
github.com/buildkite/roko v1.2.0 // indirect
|
||||
github.com/buildkite/agent/v3 v3.107.2 // indirect
|
||||
github.com/buildkite/go-pipeline v0.16.0 // indirect
|
||||
github.com/buildkite/interpolate v0.1.5 // indirect
|
||||
github.com/buildkite/roko v1.4.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect
|
||||
github.com/chzyer/readline v1.5.1 // indirect
|
||||
github.com/clbanning/mxj/v2 v2.7.0 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/cockroachdb/apd/v3 v3.2.1 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.4 // indirect
|
||||
github.com/containerd/platforms v1.0.0-rc.1 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.17.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect
|
||||
github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v27.1.1+incompatible // indirect
|
||||
github.com/docker/cli v29.0.3+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v25.0.6+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.0 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/docker v28.5.2+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.9.3 // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/emicklei/proto v1.12.1 // indirect
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/emicklei/proto v1.14.2 // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.16.0 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-chi/chi v4.1.2+incompatible // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.2.3 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.4 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.23.0 // indirect
|
||||
github.com/go-openapi/errors v0.22.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/loads v0.22.0 // indirect
|
||||
github.com/go-openapi/runtime v0.28.0 // indirect
|
||||
github.com/go-openapi/spec v0.21.0 // indirect
|
||||
github.com/go-openapi/strfmt v0.23.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-openapi/validate v0.24.0 // indirect
|
||||
github.com/go-piv/piv-go v1.11.0 // indirect
|
||||
github.com/go-openapi/analysis v0.24.1 // indirect
|
||||
github.com/go-openapi/errors v0.22.4 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.22.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.3 // indirect
|
||||
github.com/go-openapi/loads v0.23.2 // indirect
|
||||
github.com/go-openapi/runtime v0.29.2 // indirect
|
||||
github.com/go-openapi/spec v0.22.1 // indirect
|
||||
github.com/go-openapi/strfmt v0.25.0 // indirect
|
||||
github.com/go-openapi/swag v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/cmdutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/conv v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/fileutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/jsonname v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/jsonutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/loading v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/mangling v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/netutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/stringutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/typeutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
|
||||
github.com/go-openapi/validate v0.25.1 // indirect
|
||||
github.com/go-piv/piv-go/v2 v2.4.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/gomodule/redigo v1.8.2 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/certificate-transparency-go v1.2.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-github/v55 v55.0.0 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/certificate-transparency-go v1.3.2 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/go-github/v73 v73.0.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.8 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.3 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/hcl v1.0.1-vault-5 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/in-toto/attestation v1.1.0 // indirect
|
||||
github.com/in-toto/attestation v1.1.2 // indirect
|
||||
github.com/in-toto/in-toto-golang v0.9.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/compress v1.18.1 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect
|
||||
github.com/lestrrat-go/blackmagic v1.0.4 // indirect
|
||||
github.com/lestrrat-go/dsig v1.0.0 // indirect
|
||||
github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect
|
||||
github.com/lestrrat-go/httpcc v1.0.1 // indirect
|
||||
github.com/lestrrat-go/httprc/v3 v3.0.1 // indirect
|
||||
github.com/lestrrat-go/jwx/v3 v3.0.11 // indirect
|
||||
github.com/lestrrat-go/option v1.0.1 // indirect
|
||||
github.com/lestrrat-go/option/v2 v2.0.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.20251110.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/manifoldco/promptui v0.9.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.17 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mikelolasagasti/xz v1.0.1 // indirect
|
||||
github.com/minio/minlz v1.0.1 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/mozillazg/docker-credential-acr-helper v0.4.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect
|
||||
github.com/nwaples/rardecode/v2 v2.2.1 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/oleiade/reflections v1.1.0 // indirect
|
||||
github.com/open-policy-agent/opa v0.68.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/open-policy-agent/opa v1.9.0 // indirect
|
||||
github.com/pborman/uuid v1.2.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/prometheus/client_golang v1.20.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||
github.com/rubenv/sql-migrate v1.7.0 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.23.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.67.4 // indirect
|
||||
github.com/prometheus/procfs v0.17.0 // indirect
|
||||
github.com/protocolbuffers/txtpbfmt v0.0.0-20250627152318-f293424e46b5 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/rubenv/sql-migrate v1.8.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
|
||||
github.com/sassoftware/relic v7.2.1+incompatible // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect
|
||||
github.com/segmentio/asm v1.2.0 // indirect
|
||||
github.com/shibumi/go-pathspec v1.3.0 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sigstore/fulcio v1.6.3 // indirect
|
||||
github.com/sigstore/protobuf-specs v0.3.2 // indirect
|
||||
github.com/sigstore/rekor v1.3.6 // indirect
|
||||
github.com/sigstore/sigstore v1.8.9 // indirect
|
||||
github.com/sigstore/sigstore-go v0.6.1 // indirect
|
||||
github.com/sigstore/timestamp-authority v1.2.2 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/sorairolake/lzip-go v0.3.5 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.19.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.3.0 // indirect
|
||||
github.com/sigstore/fulcio v1.8.3 // indirect
|
||||
github.com/sigstore/protobuf-specs v0.5.0 // indirect
|
||||
github.com/sigstore/rekor v1.4.3 // indirect
|
||||
github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect
|
||||
github.com/sigstore/sigstore v1.10.0 // indirect
|
||||
github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 // indirect
|
||||
github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect
|
||||
github.com/sorairolake/lzip-go v0.3.8 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/spf13/viper v1.21.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.3 // indirect
|
||||
github.com/thales-e-security/pool v0.0.2 // indirect
|
||||
github.com/therootcompany/xz v1.0.1 // indirect
|
||||
github.com/theupdateframework/go-tuf v0.7.0 // indirect
|
||||
github.com/theupdateframework/go-tuf/v2 v2.0.1 // indirect
|
||||
github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/tjfoc/gmsm v1.4.1 // indirect
|
||||
github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect
|
||||
github.com/transparency-dev/merkle v0.0.2 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/vbatts/tar-split v0.11.5 // indirect
|
||||
github.com/ulikunitz/xz v0.5.15 // indirect
|
||||
github.com/valyala/fastjson v1.6.4 // indirect
|
||||
github.com/vbatts/tar-split v0.12.2 // indirect
|
||||
github.com/vektah/gqlparser/v2 v2.5.30 // indirect
|
||||
github.com/withfig/autocomplete-tools/integrations/cobra v1.2.1 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xanzy/go-gitlab v0.109.0 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/yashtewari/glob-intersection v0.2.0 // indirect
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 // indirect
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 // indirect
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect
|
||||
github.com/zeebo/errs v1.3.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.29.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.29.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.29.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.29.0 // indirect
|
||||
go.step.sm/crypto v0.51.2 // indirect
|
||||
gitlab.com/gitlab-org/api/client-go v0.148.1 // indirect
|
||||
go.mongodb.org/mongo-driver v1.17.6 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
|
||||
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go.uber.org/zap v1.27.1 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
|
||||
golang.org/x/mod v0.20.0 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
google.golang.org/api v0.196.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/grpc v1.66.0 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect
|
||||
golang.org/x/mod v0.30.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/oauth2 v0.33.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
google.golang.org/api v0.256.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect
|
||||
google.golang.org/grpc v1.77.0 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/api v0.31.3 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.31.3 // indirect
|
||||
k8s.io/apiserver v0.31.3 // indirect
|
||||
k8s.io/cli-runtime v0.31.3 // indirect
|
||||
k8s.io/component-base v0.31.3 // indirect
|
||||
k8s.io/api v0.34.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.34.0 // indirect
|
||||
k8s.io/apiserver v0.34.0 // indirect
|
||||
k8s.io/cli-runtime v0.34.0 // indirect
|
||||
k8s.io/component-base v0.34.0 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240827152857-f7e401e7b4c2 // indirect
|
||||
k8s.io/kubectl v0.31.3 // indirect
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.18.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
|
||||
sigs.k8s.io/release-utils v0.8.4 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
|
||||
k8s.io/kubectl v0.34.0 // indirect
|
||||
k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect
|
||||
oras.land/oras-go/v2 v2.6.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.20.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/release-utils v0.12.2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
@@ -7,14 +7,26 @@ import (
|
||||
|
||||
type AddImageOpts struct {
|
||||
*StoreRootOpts
|
||||
Name string
|
||||
Key string
|
||||
Platform string
|
||||
Name string
|
||||
Key string
|
||||
CertOidcIssuer string
|
||||
CertOidcIssuerRegexp string
|
||||
CertIdentity string
|
||||
CertIdentityRegexp string
|
||||
CertGithubWorkflowRepository string
|
||||
Tlog bool
|
||||
Platform string
|
||||
}
|
||||
|
||||
func (o *AddImageOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Location of public key to use for signature verification")
|
||||
f.StringVar(&o.CertIdentity, "certificate-identity", "", "(Optional) Cosign certificate-identity (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertIdentityRegexp, "certificate-identity-regexp", "", "(Optional) Cosign certificate-identity-regexp (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertOidcIssuer, "certificate-oidc-issuer", "", "(Optional) Cosign option to validate oidc issuer")
|
||||
f.StringVar(&o.CertOidcIssuerRegexp, "certificate-oidc-issuer-regexp", "", "(Optional) Cosign option to validate oidc issuer with regex")
|
||||
f.StringVar(&o.CertGithubWorkflowRepository, "certificate-github-workflow-repository", "", "(Optional) Cosign certificate-github-workflow-repository option")
|
||||
f.BoolVarP(&o.Tlog, "use-tlog-verify", "v", false, "(Optional) Allow transparency log verification. (defaults to false)")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specifiy the platform of the image... i.e. linux/amd64 (defaults to all)")
|
||||
}
|
||||
|
||||
|
||||
@@ -9,13 +9,28 @@ type CopyOpts struct {
|
||||
Password string
|
||||
Insecure bool
|
||||
PlainHTTP bool
|
||||
Only string
|
||||
}
|
||||
|
||||
func (o *CopyOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.Username, "username", "u", "", "(Optional) Username to use for authentication")
|
||||
f.StringVarP(&o.Password, "password", "p", "", "(Optional) Password to use for authentication")
|
||||
f.StringVarP(&o.Username, "username", "u", "", "(Deprecated) Please use 'hauler login'")
|
||||
f.StringVarP(&o.Password, "password", "p", "", "(Deprecated) Please use 'hauler login'")
|
||||
f.BoolVar(&o.Insecure, "insecure", false, "(Optional) Allow insecure connections")
|
||||
f.BoolVar(&o.PlainHTTP, "plain-http", false, "(Optional) Allow plain HTTP connections")
|
||||
f.StringVarP(&o.Only, "only", "o", "", "(Optional) Custom string array to only copy specific 'image' items")
|
||||
|
||||
if err := f.MarkDeprecated("username", "please use 'hauler login'"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := f.MarkDeprecated("password", "please use 'hauler login'"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := f.MarkHidden("username"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := f.MarkHidden("password"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ type InfoOpts struct {
|
||||
TypeFilter string
|
||||
SizeUnit string
|
||||
ListRepos bool
|
||||
ShowDigests bool
|
||||
}
|
||||
|
||||
func (o *InfoOpts) AddFlags(cmd *cobra.Command) {
|
||||
@@ -17,4 +18,5 @@ func (o *InfoOpts) AddFlags(cmd *cobra.Command) {
|
||||
f.StringVarP(&o.OutputFormat, "output", "o", "table", "(Optional) Specify the output format (table | json)")
|
||||
f.StringVarP(&o.TypeFilter, "type", "t", "all", "(Optional) Filter on content type (image | chart | file | sigs | atts | sbom)")
|
||||
f.BoolVar(&o.ListRepos, "list-repos", false, "(Optional) List all repository names")
|
||||
f.BoolVar(&o.ShowDigests, "digests", false, "(Optional) Show digests of each artifact in the output table")
|
||||
}
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type LoadOpts struct {
|
||||
*StoreRootOpts
|
||||
FileName []string
|
||||
TempOverride string
|
||||
}
|
||||
|
||||
func (o *LoadOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
// On Unix systems, the default is $TMPDIR if non-empty, else /tmp.
|
||||
// On Windows, the default is GetTempPath, returning the first non-empty
|
||||
// value from %TMP%, %TEMP%, %USERPROFILE%, or the Windows directory.
|
||||
// On Plan 9, the default is /tmp.
|
||||
// On Unix systems, the default is $TMPDIR if non-empty, else /tmp
|
||||
// On Windows, the default is GetTempPath, returning the first value from %TMP%, %TEMP%, %USERPROFILE%, or Windows directory
|
||||
f.StringSliceVarP(&o.FileName, "filename", "f", []string{consts.DefaultHaulerArchiveName}, "(Optional) Specify the name of inputted haul(s)")
|
||||
f.StringVarP(&o.TempOverride, "tempdir", "t", "", "(Optional) Override the default temporary directiory determined by the OS")
|
||||
}
|
||||
|
||||
@@ -14,6 +14,6 @@ type SaveOpts struct {
|
||||
func (o *SaveOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.FileName, "filename", "f", consts.DefaultHaulArchiveName, "(Optional) Specify the name of outputted archive")
|
||||
f.StringVarP(&o.FileName, "filename", "f", consts.DefaultHaulerArchiveName, "(Optional) Specify the name of outputted haul")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform for runtime imports... i.e. linux/amd64 (unspecified implies all)")
|
||||
}
|
||||
|
||||
@@ -41,6 +41,8 @@ func (o *StoreRootOpts) Store(ctx context.Context) (*store.Layout, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.StoreDir = abs
|
||||
|
||||
l.Debugf("using store at [%s]", abs)
|
||||
|
||||
if _, err := os.Stat(abs); errors.Is(err, os.ErrNotExist) {
|
||||
|
||||
@@ -1,24 +1,41 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type SyncOpts struct {
|
||||
*StoreRootOpts
|
||||
ContentFiles []string
|
||||
Key string
|
||||
Products []string
|
||||
Platform string
|
||||
Registry string
|
||||
ProductRegistry string
|
||||
FileName []string
|
||||
Key string
|
||||
CertOidcIssuer string
|
||||
CertOidcIssuerRegexp string
|
||||
CertIdentity string
|
||||
CertIdentityRegexp string
|
||||
CertGithubWorkflowRepository string
|
||||
Products []string
|
||||
Platform string
|
||||
Registry string
|
||||
ProductRegistry string
|
||||
TempOverride string
|
||||
Tlog bool
|
||||
}
|
||||
|
||||
func (o *SyncOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringSliceVarP(&o.ContentFiles, "files", "f", []string{}, "Location of content manifests (files)... i.e. --files ./rke2-files.yaml")
|
||||
f.StringSliceVarP(&o.FileName, "filename", "f", []string{consts.DefaultHaulerManifestName}, "Specify the name of manifest(s) to sync")
|
||||
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Location of public key to use for signature verification")
|
||||
f.StringSliceVar(&o.Products, "products", []string{}, "(Optional) Specify the product name to fetch collections from the product registry i.e. rancher=v2.8.5,rke2=v1.28.11+rke2r1")
|
||||
f.StringVar(&o.CertIdentity, "certificate-identity", "", "(Optional) Cosign certificate-identity (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertIdentityRegexp, "certificate-identity-regexp", "", "(Optional) Cosign certificate-identity-regexp (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertOidcIssuer, "certificate-oidc-issuer", "", "(Optional) Cosign option to validate oidc issuer")
|
||||
f.StringVar(&o.CertOidcIssuerRegexp, "certificate-oidc-issuer-regexp", "", "(Optional) Cosign option to validate oidc issuer with regex")
|
||||
f.StringVar(&o.CertGithubWorkflowRepository, "certificate-github-workflow-repository", "", "(Optional) Cosign certificate-github-workflow-repository option")
|
||||
f.StringSliceVar(&o.Products, "products", []string{}, "(Optional) Specify the product name to fetch collections from the product registry i.e. rancher=v2.10.1,rke2=v1.31.5+rke2r1")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform of the image... i.e linux/amd64 (defaults to all)")
|
||||
f.StringVarP(&o.Registry, "registry", "g", "", "(Optional) Specify the registry of the image for images that do not alredy define one")
|
||||
f.StringVarP(&o.ProductRegistry, "product-registry", "c", "", "(Optional) Specify the product registry. Defaults to RGS Carbide Registry (rgcrprod.azurecr.us)")
|
||||
f.StringVarP(&o.TempOverride, "tempdir", "t", "", "(Optional) Override the default temporary directiory determined by the OS")
|
||||
f.BoolVarP(&o.Tlog, "use-tlog-verify", "v", false, "(Optional) Allow transparency log verification. (defaults to false)")
|
||||
}
|
||||
|
||||
121
pkg/apis/hauler.cattle.io/convert/convert.go
Normal file
121
pkg/apis/hauler.cattle.io/convert/convert.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
v1alpha1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
)
|
||||
|
||||
// converts v1alpha1.Files -> v1.Files
|
||||
func ConvertFiles(in *v1alpha1.Files, out *v1.Files) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.Files = make([]v1.File, len(in.Spec.Files))
|
||||
for i := range in.Spec.Files {
|
||||
out.Spec.Files[i].Name = in.Spec.Files[i].Name
|
||||
out.Spec.Files[i].Path = in.Spec.Files[i].Path
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// converts v1alpha1.Images -> v1.Images
|
||||
func ConvertImages(in *v1alpha1.Images, out *v1.Images) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.Images = make([]v1.Image, len(in.Spec.Images))
|
||||
for i := range in.Spec.Images {
|
||||
out.Spec.Images[i].Name = in.Spec.Images[i].Name
|
||||
out.Spec.Images[i].Platform = in.Spec.Images[i].Platform
|
||||
out.Spec.Images[i].Key = in.Spec.Images[i].Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// converts v1alpha1.Charts -> v1.Charts
|
||||
func ConvertCharts(in *v1alpha1.Charts, out *v1.Charts) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.Charts = make([]v1.Chart, len(in.Spec.Charts))
|
||||
for i := range in.Spec.Charts {
|
||||
out.Spec.Charts[i].Name = in.Spec.Charts[i].Name
|
||||
out.Spec.Charts[i].RepoURL = in.Spec.Charts[i].RepoURL
|
||||
out.Spec.Charts[i].Version = in.Spec.Charts[i].Version
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// converts v1alpha1.ThickCharts -> v1.ThickCharts
|
||||
func ConvertThickCharts(in *v1alpha1.ThickCharts, out *v1.ThickCharts) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.Charts = make([]v1.ThickChart, len(in.Spec.Charts))
|
||||
for i := range in.Spec.Charts {
|
||||
out.Spec.Charts[i].Chart.Name = in.Spec.Charts[i].Chart.Name
|
||||
out.Spec.Charts[i].Chart.RepoURL = in.Spec.Charts[i].Chart.RepoURL
|
||||
out.Spec.Charts[i].Chart.Version = in.Spec.Charts[i].Chart.Version
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// converts v1alpha1.ImageTxts -> v1.ImageTxts
|
||||
func ConvertImageTxts(in *v1alpha1.ImageTxts, out *v1.ImageTxts) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.ImageTxts = make([]v1.ImageTxt, len(in.Spec.ImageTxts))
|
||||
for i := range in.Spec.ImageTxts {
|
||||
out.Spec.ImageTxts[i].Ref = in.Spec.ImageTxts[i].Ref
|
||||
out.Spec.ImageTxts[i].Sources.Include = append(
|
||||
out.Spec.ImageTxts[i].Sources.Include,
|
||||
in.Spec.ImageTxts[i].Sources.Include...,
|
||||
)
|
||||
out.Spec.ImageTxts[i].Sources.Exclude = append(
|
||||
out.Spec.ImageTxts[i].Sources.Exclude,
|
||||
in.Spec.ImageTxts[i].Sources.Exclude...,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// convert v1alpha1 object to v1 object
|
||||
func ConvertObject(in interface{}) (interface{}, error) {
|
||||
switch src := in.(type) {
|
||||
|
||||
case *v1alpha1.Files:
|
||||
dst := &v1.Files{}
|
||||
if err := ConvertFiles(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
|
||||
case *v1alpha1.Images:
|
||||
dst := &v1.Images{}
|
||||
if err := ConvertImages(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
|
||||
case *v1alpha1.Charts:
|
||||
dst := &v1.Charts{}
|
||||
if err := ConvertCharts(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
|
||||
case *v1alpha1.ThickCharts:
|
||||
dst := &v1.ThickCharts{}
|
||||
if err := ConvertThickCharts(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
|
||||
case *v1alpha1.ImageTxts:
|
||||
dst := &v1.ImageTxts{}
|
||||
if err := ConvertImageTxts(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unsupported object type [%T]", in)
|
||||
}
|
||||
42
pkg/apis/hauler.cattle.io/v1/chart.go
Normal file
42
pkg/apis/hauler.cattle.io/v1/chart.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Charts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ChartSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ChartSpec struct {
|
||||
Charts []Chart `json:"charts,omitempty"`
|
||||
}
|
||||
|
||||
type Chart struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
RepoURL string `json:"repoURL,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
type ThickCharts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ThickChartSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ThickChartSpec struct {
|
||||
Charts []ThickChart `json:"charts,omitempty"`
|
||||
}
|
||||
|
||||
type ThickChart struct {
|
||||
Chart `json:",inline,omitempty"`
|
||||
ExtraImages []ChartImage `json:"extraImages,omitempty"`
|
||||
}
|
||||
|
||||
type ChartImage struct {
|
||||
Reference string `json:"ref"`
|
||||
}
|
||||
17
pkg/apis/hauler.cattle.io/v1/driver.go
Normal file
17
pkg/apis/hauler.cattle.io/v1/driver.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Driver struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec DriverSpec `json:"spec"`
|
||||
}
|
||||
|
||||
type DriverSpec struct {
|
||||
Type string `json:"type"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
25
pkg/apis/hauler.cattle.io/v1/file.go
Normal file
25
pkg/apis/hauler.cattle.io/v1/file.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Files struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec FileSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type FileSpec struct {
|
||||
Files []File `json:"files,omitempty"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
// Path is the path to the file contents, can be a local or remote path
|
||||
Path string `json:"path"`
|
||||
|
||||
// Name is an optional field specifying the name of the file when specified,
|
||||
// it will override any dynamic name discovery from Path
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
12
pkg/apis/hauler.cattle.io/v1/groupversion_info.go
Normal file
12
pkg/apis/hauler.cattle.io/v1/groupversion_info.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
var (
|
||||
ContentGroupVersion = schema.GroupVersion{Group: consts.ContentGroup, Version: "v1"}
|
||||
CollectionGroupVersion = schema.GroupVersion{Group: consts.CollectionGroup, Version: "v1"}
|
||||
)
|
||||
40
pkg/apis/hauler.cattle.io/v1/image.go
Normal file
40
pkg/apis/hauler.cattle.io/v1/image.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Images struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ImageSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ImageSpec struct {
|
||||
Images []Image `json:"images,omitempty"`
|
||||
}
|
||||
|
||||
type Image struct {
|
||||
// Name is the full location for the image, can be referenced by tags or digests
|
||||
Name string `json:"name"`
|
||||
|
||||
// Path is the path to the cosign public key used for verifying image signatures
|
||||
//Key string `json:"key,omitempty"`
|
||||
Key string `json:"key"`
|
||||
|
||||
// Path is the path to the cosign public key used for verifying image signatures
|
||||
//Tlog string `json:"use-tlog-verify,omitempty"`
|
||||
Tlog bool `json:"use-tlog-verify"`
|
||||
|
||||
// cosign keyless validation options
|
||||
CertIdentity string `json:"certificate-identity"`
|
||||
CertIdentityRegexp string `json:"certificate-identity-regexp"`
|
||||
CertOidcIssuer string `json:"certificate-oidc-issuer"`
|
||||
CertOidcIssuerRegexp string `json:"certificate-oidc-issuer-regexp"`
|
||||
CertGithubWorkflowRepository string `json:"certificate-github-workflow-repository"`
|
||||
|
||||
// Platform of the image to be pulled. If not specified, all platforms will be pulled.
|
||||
//Platform string `json:"key,omitempty"`
|
||||
Platform string `json:"platform"`
|
||||
}
|
||||
26
pkg/apis/hauler.cattle.io/v1/imagetxt.go
Normal file
26
pkg/apis/hauler.cattle.io/v1/imagetxt.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type ImageTxts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ImageTxtsSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxtsSpec struct {
|
||||
ImageTxts []ImageTxt `json:"imageTxts,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxt struct {
|
||||
Ref string `json:"ref,omitempty"`
|
||||
Sources ImageTxtSources `json:"sources,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxtSources struct {
|
||||
Include []string `json:"include,omitempty"`
|
||||
Exclude []string `json:"exclude,omitempty"`
|
||||
}
|
||||
@@ -7,6 +7,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ContentGroupVersion = schema.GroupVersion{Group: consts.ContentGroup, Version: consts.APIVersion}
|
||||
CollectionGroupVersion = schema.GroupVersion{Group: consts.CollectionGroup, Version: consts.APIVersion}
|
||||
ContentGroupVersion = schema.GroupVersion{Group: consts.ContentGroup, Version: "v1alpha1"}
|
||||
CollectionGroupVersion = schema.GroupVersion{Group: consts.CollectionGroup, Version: "v1alpha1"}
|
||||
)
|
||||
|
||||
@@ -23,6 +23,17 @@ type Image struct {
|
||||
//Key string `json:"key,omitempty"`
|
||||
Key string `json:"key"`
|
||||
|
||||
// Path is the path to the cosign public key used for verifying image signatures
|
||||
//Tlog string `json:"use-tlog-verify,omitempty"`
|
||||
Tlog bool `json:"use-tlog-verify"`
|
||||
|
||||
// cosign keyless validation options
|
||||
CertIdentity string `json:"certificate-identity"`
|
||||
CertIdentityRegexp string `json:"certificate-identity-regexp"`
|
||||
CertOidcIssuer string `json:"certificate-oidc-issuer"`
|
||||
CertOidcIssuerRegexp string `json:"certificate-oidc-issuer-regexp"`
|
||||
CertGithubWorkflowRepository string `json:"certificate-github-workflow-repository"`
|
||||
|
||||
// Platform of the image to be pulled. If not specified, all platforms will be pulled.
|
||||
//Platform string `json:"key,omitempty"`
|
||||
Platform string `json:"platform"`
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
// Maps to handle compression and archival types
|
||||
// maps to handle compression types
|
||||
var CompressionMap = map[string]archives.Compression{
|
||||
"gz": archives.Gz{},
|
||||
"bz2": archives.Bz2{},
|
||||
@@ -20,6 +20,7 @@ var CompressionMap = map[string]archives.Compression{
|
||||
"br": archives.Brotli{},
|
||||
}
|
||||
|
||||
// maps to handle archival types
|
||||
var ArchivalMap = map[string]archives.Archival{
|
||||
"tar": archives.Tar{},
|
||||
"zip": archives.Zip{},
|
||||
@@ -31,31 +32,31 @@ func isExist(path string) bool {
|
||||
return !os.IsNotExist(statErr)
|
||||
}
|
||||
|
||||
// Archive is a function that archives the files in a directory
|
||||
// archives the files in a directory
|
||||
// dir: the directory to Archive
|
||||
// outfile: the output file
|
||||
// compression: the compression to use (gzip, bzip2, etc.)
|
||||
// archival: the archival to use (tar, zip, etc.)
|
||||
func Archive(ctx context.Context, dir, outfile string, compression archives.Compression, archival archives.Archival) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.Debugf("Starting the archival process for directory: %s", dir)
|
||||
l.Debugf("starting the archival process for [%s]", dir)
|
||||
|
||||
// remove outfile
|
||||
l.Debugf("Removing any existing output file: %s", outfile)
|
||||
l.Debugf("removing existing output file: [%s]", outfile)
|
||||
if err := os.RemoveAll(outfile); err != nil {
|
||||
errMsg := fmt.Errorf("failed to remove existing output file '%s': %w", outfile, err)
|
||||
errMsg := fmt.Errorf("failed to remove existing output file [%s]: %w", outfile, err)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
|
||||
if !isExist(dir) {
|
||||
errMsg := fmt.Errorf("directory '%s' does not exist, cannot proceed with archival", dir)
|
||||
errMsg := fmt.Errorf("directory [%s] does not exist, cannot proceed with archival", dir)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
|
||||
// map files on disk to their paths in the archive
|
||||
l.Debugf("Mapping files in directory: %s", dir)
|
||||
l.Debugf("mapping files in directory [%s]", dir)
|
||||
archiveDirName := filepath.Base(filepath.Clean(dir))
|
||||
if dir == "." {
|
||||
archiveDirName = ""
|
||||
@@ -64,40 +65,40 @@ func Archive(ctx context.Context, dir, outfile string, compression archives.Comp
|
||||
dir: archiveDirName,
|
||||
})
|
||||
if err != nil {
|
||||
errMsg := fmt.Errorf("error mapping files from directory '%s': %w", dir, err)
|
||||
errMsg := fmt.Errorf("error mapping files from directory [%s]: %w", dir, err)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
l.Debugf("Successfully mapped files for directory: %s", dir)
|
||||
l.Debugf("successfully mapped files for directory [%s]", dir)
|
||||
|
||||
// create the output file we'll write to
|
||||
l.Debugf("Creating output file: %s", outfile)
|
||||
l.Debugf("creating output file [%s]", outfile)
|
||||
outf, err := os.Create(outfile)
|
||||
if err != nil {
|
||||
errMsg := fmt.Errorf("error creating output file '%s': %w", outfile, err)
|
||||
errMsg := fmt.Errorf("error creating output file [%s]: %w", outfile, err)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
defer func() {
|
||||
l.Debugf("Closing output file: %s", outfile)
|
||||
l.Debugf("closing output file [%s]", outfile)
|
||||
outf.Close()
|
||||
}()
|
||||
|
||||
// define the archive format
|
||||
l.Debugf("Defining the archive format with compression: %T and archival: %T", compression, archival)
|
||||
l.Debugf("defining the archive format: [%T]/[%T]", archival, compression)
|
||||
format := archives.CompressedArchive{
|
||||
Compression: compression,
|
||||
Archival: archival,
|
||||
}
|
||||
|
||||
// create the archive
|
||||
l.Debugf("Starting archive creation: %s", outfile)
|
||||
l.Debugf("starting archive for [%s]", outfile)
|
||||
err = format.Archive(context.Background(), outf, files)
|
||||
if err != nil {
|
||||
errMsg := fmt.Errorf("error during archive creation for output file '%s': %w", outfile, err)
|
||||
errMsg := fmt.Errorf("error during archive creation for output file [%s]: %w", outfile, err)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
l.Debugf("Archive created successfully: %s", outfile)
|
||||
l.Debugf("archive created successfully [%s]", outfile)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,14 +13,14 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
dirPermissions = 0o700 // Default directory permissions
|
||||
filePermissions = 0o600 // Default file permissions
|
||||
dirPermissions = 0o700 // default directory permissions
|
||||
filePermissions = 0o600 // default file permissions
|
||||
)
|
||||
|
||||
// securePath ensures the path is safely relative to the target directory.
|
||||
// ensures the path is safely relative to the target directory
|
||||
func securePath(basePath, relativePath string) (string, error) {
|
||||
relativePath = filepath.Clean("/" + relativePath) // Normalize path with a leading slash
|
||||
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator)) // Remove leading separator
|
||||
relativePath = filepath.Clean("/" + relativePath)
|
||||
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
|
||||
|
||||
dstPath := filepath.Join(basePath, relativePath)
|
||||
|
||||
@@ -30,110 +30,110 @@ func securePath(basePath, relativePath string) (string, error) {
|
||||
return dstPath, nil
|
||||
}
|
||||
|
||||
// createDirWithPermissions creates a directory with specified permissions.
|
||||
// creates a directory with specified permissions
|
||||
func createDirWithPermissions(ctx context.Context, path string, mode os.FileMode) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.Debugf("Creating directory: %s", path)
|
||||
l.Debugf("creating directory [%s]", path)
|
||||
if err := os.MkdirAll(path, mode); err != nil {
|
||||
return fmt.Errorf("mkdir: %w", err)
|
||||
return fmt.Errorf("failed to mkdir: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setPermissions applies permissions to a file or directory.
|
||||
// sets permissions to a file or directory
|
||||
func setPermissions(path string, mode os.FileMode) error {
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return fmt.Errorf("chmod: %w", err)
|
||||
return fmt.Errorf("failed to chmod: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleFile handles the extraction of a file from the archive.
|
||||
// handles the extraction of a file from the archive.
|
||||
func handleFile(ctx context.Context, f archives.FileInfo, dst string) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.Debugf("Handling file: %s", f.NameInArchive)
|
||||
l.Debugf("handling file [%s]", f.NameInArchive)
|
||||
|
||||
// Validate and construct the destination path
|
||||
// validate and construct the destination path
|
||||
dstPath, pathErr := securePath(dst, f.NameInArchive)
|
||||
if pathErr != nil {
|
||||
return pathErr
|
||||
}
|
||||
|
||||
// Ensure the parent directory exists
|
||||
// ensure the parent directory exists
|
||||
parentDir := filepath.Dir(dstPath)
|
||||
if dirErr := createDirWithPermissions(ctx, parentDir, dirPermissions); dirErr != nil {
|
||||
return dirErr
|
||||
}
|
||||
|
||||
// Handle directories
|
||||
// handle directories
|
||||
if f.IsDir() {
|
||||
// Create the directory with permissions from the archive
|
||||
// create the directory with permissions from the archive
|
||||
if dirErr := createDirWithPermissions(ctx, dstPath, f.Mode()); dirErr != nil {
|
||||
return fmt.Errorf("creating directory: %w", dirErr)
|
||||
return fmt.Errorf("failed to create directory: %w", dirErr)
|
||||
}
|
||||
l.Debugf("Successfully created directory: %s", dstPath)
|
||||
l.Debugf("successfully created directory [%s]", dstPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ignore symlinks (or hardlinks)
|
||||
// ignore symlinks (or hardlinks)
|
||||
if f.LinkTarget != "" {
|
||||
l.Debugf("Skipping symlink: %s -> %s", dstPath, f.LinkTarget)
|
||||
l.Debugf("skipping symlink [%s] to [%s]", dstPath, f.LinkTarget)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check and handle parent directory permissions
|
||||
// check and handle parent directory permissions
|
||||
originalMode, statErr := os.Stat(parentDir)
|
||||
if statErr != nil {
|
||||
return fmt.Errorf("stat parent directory: %w", statErr)
|
||||
return fmt.Errorf("failed to stat parent directory: %w", statErr)
|
||||
}
|
||||
|
||||
// If parent directory is read-only, temporarily make it writable
|
||||
// if parent directory is read only, temporarily make it writable
|
||||
if originalMode.Mode().Perm()&0o200 == 0 {
|
||||
l.Debugf("Parent directory is read-only, temporarily making it writable: %s", parentDir)
|
||||
l.Debugf("parent directory is read only... temporarily making it writable [%s]", parentDir)
|
||||
if chmodErr := os.Chmod(parentDir, originalMode.Mode()|0o200); chmodErr != nil {
|
||||
return fmt.Errorf("chmod parent directory: %w", chmodErr)
|
||||
return fmt.Errorf("failed to chmod parent directory: %w", chmodErr)
|
||||
}
|
||||
defer func() {
|
||||
// Restore the original permissions after writing
|
||||
// restore the original permissions after writing
|
||||
if chmodErr := os.Chmod(parentDir, originalMode.Mode()); chmodErr != nil {
|
||||
l.Debugf("Failed to restore original permissions for %s: %v", parentDir, chmodErr)
|
||||
l.Debugf("failed to restore original permissions for [%s]: %v", parentDir, chmodErr)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Handle regular files
|
||||
// handle regular files
|
||||
reader, openErr := f.Open()
|
||||
if openErr != nil {
|
||||
return fmt.Errorf("open file: %w", openErr)
|
||||
return fmt.Errorf("failed to open file: %w", openErr)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
dstFile, createErr := os.OpenFile(dstPath, os.O_CREATE|os.O_WRONLY, f.Mode())
|
||||
if createErr != nil {
|
||||
return fmt.Errorf("create file: %w", createErr)
|
||||
return fmt.Errorf("failed to create file: %w", createErr)
|
||||
}
|
||||
defer dstFile.Close()
|
||||
|
||||
if _, copyErr := io.Copy(dstFile, reader); copyErr != nil {
|
||||
return fmt.Errorf("copy: %w", copyErr)
|
||||
return fmt.Errorf("failed to copy: %w", copyErr)
|
||||
}
|
||||
l.Debugf("Successfully extracted file: %s", dstPath)
|
||||
l.Debugf("successfully extracted file [%s]", dstPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unarchive unarchives a tarball to a directory, symlinks and hardlinks are ignored.
|
||||
// unarchives a tarball to a directory, symlinks, and hardlinks are ignored
|
||||
func Unarchive(ctx context.Context, tarball, dst string) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.Debugf("Unarchiving %s to %s", tarball, dst)
|
||||
l.Debugf("unarchiving temporary archive [%s] to temporary store [%s]", tarball, dst)
|
||||
archiveFile, openErr := os.Open(tarball)
|
||||
if openErr != nil {
|
||||
return fmt.Errorf("open tarball %s: %w", tarball, openErr)
|
||||
return fmt.Errorf("failed to open tarball %s: %w", tarball, openErr)
|
||||
}
|
||||
defer archiveFile.Close()
|
||||
|
||||
format, input, identifyErr := archives.Identify(context.Background(), tarball, archiveFile)
|
||||
if identifyErr != nil {
|
||||
return fmt.Errorf("identify format: %w", identifyErr)
|
||||
return fmt.Errorf("failed to identify format: %w", identifyErr)
|
||||
}
|
||||
|
||||
extractor, ok := format.(archives.Extractor)
|
||||
@@ -142,7 +142,7 @@ func Unarchive(ctx context.Context, tarball, dst string) error {
|
||||
}
|
||||
|
||||
if dirErr := createDirWithPermissions(ctx, dst, dirPermissions); dirErr != nil {
|
||||
return fmt.Errorf("creating destination directory: %w", dirErr)
|
||||
return fmt.Errorf("failed to create destination directory: %w", dirErr)
|
||||
}
|
||||
|
||||
handler := func(ctx context.Context, f archives.FileInfo) error {
|
||||
@@ -150,9 +150,9 @@ func Unarchive(ctx context.Context, tarball, dst string) error {
|
||||
}
|
||||
|
||||
if extractErr := extractor.Extract(context.Background(), input, handler); extractErr != nil {
|
||||
return fmt.Errorf("extracting files: %w", extractErr)
|
||||
return fmt.Errorf("failed to extract: %w", extractErr)
|
||||
}
|
||||
|
||||
l.Debugf("Unarchiving completed successfully.")
|
||||
l.Infof("unarchiving completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
gtypes "github.com/google/go-containerregistry/pkg/v1/types"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/file/getter"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
)
|
||||
|
||||
// interface guard
|
||||
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"github.com/spf13/afero"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts/file"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/file/getter"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -2,7 +2,7 @@ package file
|
||||
|
||||
import (
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/file/getter"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
)
|
||||
|
||||
type Option func(*File)
|
||||
|
||||
@@ -3,7 +3,7 @@ package chart
|
||||
import (
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
"hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/image"
|
||||
"hauler.dev/go/hauler/pkg/content/chart"
|
||||
@@ -15,13 +15,13 @@ var _ artifacts.OCICollection = (*tchart)(nil)
|
||||
// tchart is a thick chart that includes all the dependent images as well as the chart itself
|
||||
type tchart struct {
|
||||
chart *chart.Chart
|
||||
config v1alpha1.ThickChart
|
||||
config v1.ThickChart
|
||||
|
||||
computed bool
|
||||
contents map[string]artifacts.OCI
|
||||
}
|
||||
|
||||
func NewThickChart(cfg v1alpha1.ThickChart, opts *action.ChartPathOptions) (artifacts.OCICollection, error) {
|
||||
func NewThickChart(cfg v1.ThickChart, opts *action.ChartPathOptions) (artifacts.OCICollection, error) {
|
||||
o, err := chart.NewChart(cfg.Chart.Name, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/client-go/util/jsonpath"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
"hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
)
|
||||
|
||||
var defaultKnownImagePaths = []string{
|
||||
@@ -29,13 +29,13 @@ var defaultKnownImagePaths = []string{
|
||||
}
|
||||
|
||||
// ImagesInChart will render a chart and identify all dependent images from it
|
||||
func ImagesInChart(c *helmchart.Chart) (v1alpha1.Images, error) {
|
||||
func ImagesInChart(c *helmchart.Chart) (v1.Images, error) {
|
||||
docs, err := template(c)
|
||||
if err != nil {
|
||||
return v1alpha1.Images{}, err
|
||||
return v1.Images{}, err
|
||||
}
|
||||
|
||||
var images []v1alpha1.Image
|
||||
var images []v1.Image
|
||||
reader := yaml.NewYAMLReader(bufio.NewReader(strings.NewReader(docs)))
|
||||
for {
|
||||
raw, err := reader.Read()
|
||||
@@ -43,17 +43,17 @@ func ImagesInChart(c *helmchart.Chart) (v1alpha1.Images, error) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return v1alpha1.Images{}, err
|
||||
return v1.Images{}, err
|
||||
}
|
||||
|
||||
found := find(raw, defaultKnownImagePaths...)
|
||||
for _, f := range found {
|
||||
images = append(images, v1alpha1.Image{Name: f})
|
||||
images = append(images, v1.Image{Name: f})
|
||||
}
|
||||
}
|
||||
|
||||
ims := v1alpha1.Images{
|
||||
Spec: v1alpha1.ImageSpec{
|
||||
ims := v1.Images{
|
||||
Spec: v1.ImageSpec{
|
||||
Images: images,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
|
||||
artifact "hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/file/getter"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/image"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
|
||||
@@ -42,12 +42,22 @@ const (
|
||||
HaulerVendorPrefix = "vnd.hauler"
|
||||
|
||||
// annotation keys
|
||||
ContainerdImageNameKey = "io.containerd.image.name"
|
||||
KindAnnotationName = "kind"
|
||||
KindAnnotationImage = "dev.cosignproject.cosign/image"
|
||||
KindAnnotationIndex = "dev.cosignproject.cosign/imageIndex"
|
||||
ImageAnnotationKey = "hauler.dev/key"
|
||||
ImageAnnotationPlatform = "hauler.dev/platform"
|
||||
ImageAnnotationRegistry = "hauler.dev/registry"
|
||||
ImageAnnotationTlog = "hauler.dev/use-tlog-verify"
|
||||
ImageRefKey = "org.opencontainers.image.ref.name"
|
||||
|
||||
// cosign keyless validation options
|
||||
ImageAnnotationCertIdentity = "hauler.dev/certificate-identity"
|
||||
ImageAnnotationCertIdentityRegexp = "hauler.dev/certificate-identity-regexp"
|
||||
ImageAnnotationCertOidcIssuer = "hauler.dev/certificate-oidc-issuer"
|
||||
ImageAnnotationCertOidcIssuerRegexp = "hauler.dev/certificate-oidc-issuer-regexp"
|
||||
ImageAnnotationCertGithubWorkflowRepository = "hauler.dev/certificate-github-workflow-repository"
|
||||
|
||||
// content kinds
|
||||
ImagesContentKind = "Images"
|
||||
@@ -68,27 +78,24 @@ const (
|
||||
HaulerIgnoreErrors = "HAULER_IGNORE_ERRORS"
|
||||
|
||||
// container files and directories
|
||||
OCIImageIndexFile = "index.json"
|
||||
OCIImageLayoutFile = "oci-layout"
|
||||
OCIImageBlobsDir = "blobs"
|
||||
ImageManifestFile = "manifest.json"
|
||||
ImageConfigFile = "config.json"
|
||||
ImageManifestFile = "manifest.json"
|
||||
ImageConfigFile = "config.json"
|
||||
|
||||
// other constraints
|
||||
CarbideRegistry = "rgcrprod.azurecr.us"
|
||||
APIVersion = "v1alpha1"
|
||||
DefaultNamespace = "hauler"
|
||||
DefaultTag = "latest"
|
||||
DefaultStoreName = "store"
|
||||
DefaultHaulerDirName = ".hauler"
|
||||
DefaultHaulerTempDirName = "hauler"
|
||||
DefaultRegistryRootDir = "registry"
|
||||
DefaultRegistryPort = 5000
|
||||
DefaultFileserverRootDir = "fileserver"
|
||||
DefaultFileserverPort = 8080
|
||||
DefaultFileserverTimeout = 60
|
||||
DefaultHaulArchiveName = "haul.tar.zst"
|
||||
DefaultRetries = 3
|
||||
RetriesInterval = 5
|
||||
CustomTimeFormat = "2006-01-02 15:04:05"
|
||||
CarbideRegistry = "rgcrprod.azurecr.us"
|
||||
DefaultNamespace = "hauler"
|
||||
DefaultTag = "latest"
|
||||
DefaultStoreName = "store"
|
||||
DefaultHaulerDirName = ".hauler"
|
||||
DefaultHaulerTempDirName = "hauler"
|
||||
DefaultRegistryRootDir = "registry"
|
||||
DefaultRegistryPort = 5000
|
||||
DefaultFileserverRootDir = "fileserver"
|
||||
DefaultFileserverPort = 8080
|
||||
DefaultFileserverTimeout = 60
|
||||
DefaultHaulerArchiveName = "haul.tar.zst"
|
||||
DefaultHaulerManifestName = "hauler-manifest.yaml"
|
||||
DefaultRetries = 3
|
||||
RetriesInterval = 5
|
||||
CustomTimeFormat = "2006-01-02 15:04:05"
|
||||
)
|
||||
|
||||
@@ -40,7 +40,7 @@ type Chart struct {
|
||||
annotations map[string]string
|
||||
}
|
||||
|
||||
// NewChart is a helper method that returns NewLocalChart or NewRemoteChart depending on v1alpha1.Chart contents
|
||||
// NewChart is a helper method that returns NewLocalChart or NewRemoteChart depending on chart contents
|
||||
func NewChart(name string, opts *action.ChartPathOptions) (*Chart, error) {
|
||||
chartRef := name
|
||||
actionConfig := new(action.Configuration)
|
||||
|
||||
@@ -7,17 +7,31 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
v1alpha1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
)
|
||||
|
||||
func Load(data []byte) (schema.ObjectKind, error) {
|
||||
var tm metav1.TypeMeta
|
||||
if err := yaml.Unmarshal(data, &tm); err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to parse manifest: %w", err)
|
||||
}
|
||||
|
||||
if tm.GroupVersionKind().GroupVersion() != v1alpha1.ContentGroupVersion && tm.GroupVersionKind().GroupVersion() != v1alpha1.CollectionGroupVersion {
|
||||
return nil, fmt.Errorf("unrecognized content/collection type: %s", tm.GroupVersionKind().String())
|
||||
if tm.APIVersion == "" {
|
||||
return nil, fmt.Errorf("missing required manifest field [apiVersion]")
|
||||
}
|
||||
|
||||
if tm.Kind == "" {
|
||||
return nil, fmt.Errorf("missing required manifest field [kind]")
|
||||
}
|
||||
|
||||
gv := tm.GroupVersionKind().GroupVersion()
|
||||
// allow v1 and v1alpha1 content/collection
|
||||
if gv != v1.ContentGroupVersion &&
|
||||
gv != v1.CollectionGroupVersion &&
|
||||
gv != v1alpha1.ContentGroupVersion &&
|
||||
gv != v1alpha1.CollectionGroupVersion {
|
||||
return nil, fmt.Errorf("unrecognized content/collection [%s] with [kind=%s]", tm.APIVersion, tm.Kind)
|
||||
}
|
||||
|
||||
return &tm, nil
|
||||
|
||||
@@ -66,7 +66,7 @@ func (o *OCI) AddIndex(desc ocispec.Descriptor) error {
|
||||
|
||||
// LoadIndex will load the index from disk
|
||||
func (o *OCI) LoadIndex() error {
|
||||
path := o.path(consts.OCIImageIndexFile)
|
||||
path := o.path(ocispec.ImageIndexFile)
|
||||
idx, err := os.Open(path)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
@@ -138,7 +138,7 @@ func (o *OCI) SaveIndex() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(o.path(consts.OCIImageIndexFile), data, 0644)
|
||||
return os.WriteFile(o.path(ocispec.ImageIndexFile), data, 0644)
|
||||
}
|
||||
|
||||
// Resolve attempts to resolve the reference into a name and descriptor.
|
||||
@@ -229,7 +229,7 @@ func (o *OCI) Walk(fn func(reference string, desc ocispec.Descriptor) error) err
|
||||
return true
|
||||
})
|
||||
if errst != nil {
|
||||
return fmt.Errorf(strings.Join(errst, "; "))
|
||||
return fmt.Errorf("%s", strings.Join(errst, "; "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -259,7 +259,7 @@ func (o *OCI) blobWriterAt(desc ocispec.Descriptor) (*os.File, error) {
|
||||
}
|
||||
|
||||
func (o *OCI) ensureBlob(alg string, hex string) (string, error) {
|
||||
dir := o.path(consts.OCIImageBlobsDir, alg)
|
||||
dir := o.path(ocispec.ImageBlobsDir, alg)
|
||||
if err := os.MkdirAll(dir, os.ModePerm); err != nil && !os.IsExist(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -3,25 +3,72 @@ package cosign
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/sigstore/cosign/v2/cmd/cosign/cli"
|
||||
"github.com/sigstore/cosign/v2/cmd/cosign/cli/options"
|
||||
"github.com/sigstore/cosign/v2/cmd/cosign/cli/verify"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sigstore/cosign/v3/cmd/cosign/cli"
|
||||
"github.com/sigstore/cosign/v3/cmd/cosign/cli/options"
|
||||
"github.com/sigstore/cosign/v3/cmd/cosign/cli/verify"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/image"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
"oras.land/oras-go/pkg/content"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// VerifyFileSignature verifies the digital signature of a file using Sigstore/Cosign.
|
||||
func VerifySignature(ctx context.Context, s *store.Layout, keyPath string, ref string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
// VerifySignature verifies the digital signature of a file using Sigstore/Cosign.
|
||||
func VerifySignature(ctx context.Context, s *store.Layout, keyPath string, useTlog bool, ref string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
operation := func() error {
|
||||
v := &verify.VerifyCommand{
|
||||
KeyRef: keyPath,
|
||||
KeyRef: keyPath,
|
||||
IgnoreTlog: true, // Ignore transparency log by default.
|
||||
NewBundleFormat: true,
|
||||
}
|
||||
|
||||
// if the user wants to use the transparency log, set the flag to false
|
||||
if useTlog {
|
||||
v.IgnoreTlog = false
|
||||
}
|
||||
|
||||
err := log.CaptureOutput(l, true, func() error {
|
||||
return v.Exec(ctx, []string{ref})
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return RetryOperation(ctx, rso, ro, operation)
|
||||
}
|
||||
|
||||
// VerifyKeylessSignature verifies the digital signature of a file using Sigstore/Cosign.
|
||||
func VerifyKeylessSignature(ctx context.Context, s *store.Layout, identity string, identityRegexp string, oidcIssuer string, oidcIssuerRegexp string, ghWorkflowRepository string, useTlog bool, ref string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
operation := func() error {
|
||||
|
||||
certVerifyOptions := options.CertVerifyOptions{
|
||||
CertOidcIssuer: oidcIssuer,
|
||||
CertOidcIssuerRegexp: oidcIssuerRegexp,
|
||||
CertIdentity: identity,
|
||||
CertIdentityRegexp: identityRegexp,
|
||||
CertGithubWorkflowRepository: ghWorkflowRepository,
|
||||
}
|
||||
|
||||
v := &verify.VerifyCommand{
|
||||
CertVerifyOptions: certVerifyOptions,
|
||||
IgnoreTlog: false, // Ignore transparency log is set to false by default for keyless signature verification
|
||||
CertGithubWorkflowRepository: ghWorkflowRepository,
|
||||
NewBundleFormat: true,
|
||||
}
|
||||
|
||||
// if the user wants to use the transparency log, set the flag to false
|
||||
if useTlog {
|
||||
v.IgnoreTlog = false
|
||||
}
|
||||
|
||||
err := log.CaptureOutput(l, true, func() error {
|
||||
@@ -79,7 +126,7 @@ func SaveImage(ctx context.Context, s *store.Layout, ref string, platform string
|
||||
}
|
||||
|
||||
// LoadImage loads store to a remote registry.
|
||||
func LoadImages(ctx context.Context, s *store.Layout, registry string, ropts content.RegistryOptions, ro *flags.CliRootOpts) error {
|
||||
func LoadImages(ctx context.Context, s *store.Layout, registry string, only string, ropts content.RegistryOptions, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
o := &options.LoadOptions{
|
||||
@@ -89,10 +136,15 @@ func LoadImages(ctx context.Context, s *store.Layout, registry string, ropts con
|
||||
},
|
||||
}
|
||||
|
||||
// Conditionally add extra registry flags.
|
||||
// Conditionally add extra flags.
|
||||
if len(only) > 0 {
|
||||
o.LoadOnly = only
|
||||
}
|
||||
|
||||
if ropts.Insecure {
|
||||
o.Registry.AllowInsecure = true
|
||||
}
|
||||
|
||||
if ropts.PlainHTTP {
|
||||
o.Registry.AllowHTTPRegistry = true
|
||||
}
|
||||
@@ -137,9 +189,17 @@ func RetryOperation(ctx context.Context, rso *flags.StoreRootOpts, ro *flags.Cli
|
||||
}
|
||||
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("warning (attempt %d/%d)... %v", attempt, rso.Retries, err)
|
||||
if strings.HasPrefix(err.Error(), "function execution failed: no matching signatures: rekor client not provided for online verification") {
|
||||
l.Warnf("warning (attempt %d/%d)... failed tlog verification", attempt, rso.Retries)
|
||||
} else {
|
||||
l.Warnf("warning (attempt %d/%d)... %v", attempt, rso.Retries, err)
|
||||
}
|
||||
} else {
|
||||
l.Errorf("error (attempt %d/%d)... %v", attempt, rso.Retries, err)
|
||||
if strings.HasPrefix(err.Error(), "function execution failed: no matching signatures: rekor client not provided for online verification") {
|
||||
l.Errorf("error (attempt %d/%d)... failed tlog verification", attempt, rso.Retries)
|
||||
} else {
|
||||
l.Errorf("error (attempt %d/%d)... %v", attempt, rso.Retries, err)
|
||||
}
|
||||
}
|
||||
|
||||
// If this is not the last attempt, wait before retrying
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts/file/getter"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
)
|
||||
|
||||
func TestClient_Detect(t *testing.T) {
|
||||
@@ -151,17 +151,17 @@ func (l *Layout) AddOCICollection(ctx context.Context, collection artifacts.OCIC
|
||||
// This can be a highly destructive operation if the store's directory happens to be inline with other non-store contents
|
||||
// To reduce the blast radius and likelihood of deleting things we don't own, Flush explicitly deletes oci-layout content only
|
||||
func (l *Layout) Flush(ctx context.Context) error {
|
||||
blobs := filepath.Join(l.Root, consts.OCIImageBlobsDir)
|
||||
blobs := filepath.Join(l.Root, ocispec.ImageBlobsDir)
|
||||
if err := os.RemoveAll(blobs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
index := filepath.Join(l.Root, consts.OCIImageIndexFile)
|
||||
index := filepath.Join(l.Root, ocispec.ImageIndexFile)
|
||||
if err := os.RemoveAll(index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
layout := filepath.Join(l.Root, consts.OCIImageLayoutFile)
|
||||
layout := filepath.Join(l.Root, ocispec.ImageLayoutFile)
|
||||
if err := os.RemoveAll(layout); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -240,7 +240,7 @@ func (l *Layout) writeLayer(layer v1.Layer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
dir := filepath.Join(l.Root, consts.OCIImageBlobsDir, d.Algorithm)
|
||||
dir := filepath.Join(l.Root, ocispec.ImageBlobsDir, d.Algorithm)
|
||||
if err := os.MkdirAll(dir, os.ModePerm); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
53
testdata/hauler-manifest-pipeline.yaml
vendored
53
testdata/hauler-manifest-pipeline.yaml
vendored
@@ -1,3 +1,54 @@
|
||||
# v1 manifests
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Images
|
||||
metadata:
|
||||
name: hauler-content-images-example
|
||||
spec:
|
||||
images:
|
||||
- name: busybox
|
||||
- name: busybox:stable
|
||||
platform: linux/amd64
|
||||
- name: gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Charts
|
||||
metadata:
|
||||
name: hauler-content-charts-example
|
||||
spec:
|
||||
charts:
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
version: 2.8.4
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
version: 2.8.3
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
version: 1.0.6
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
version: 1.0.4
|
||||
- name: rancher-cluster-templates-0.5.2.tgz
|
||||
repoURL: testdata
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: hauler-content-files-example
|
||||
spec:
|
||||
files:
|
||||
- path: https://get.rke2.io/install.sh
|
||||
- path: https://get.rke2.io/install.sh
|
||||
name: rke2-install.sh
|
||||
- path: testdata/hauler-manifest.yaml
|
||||
- path: testdata/hauler-manifest.yaml
|
||||
name: hauler-manifest-local.yaml
|
||||
---
|
||||
# v1alpha1 manifests
|
||||
apiVersion: content.hauler.cattle.io/v1alpha1
|
||||
kind: Images
|
||||
metadata:
|
||||
@@ -32,7 +83,7 @@ spec:
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
version: 1.0.4
|
||||
- name: rancher-cluster-templates-0.5.2.tgz
|
||||
repoURL: .
|
||||
repoURL: testdata
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1alpha1
|
||||
kind: Files
|
||||
|
||||
35
testdata/hauler-manifest.yaml
vendored
35
testdata/hauler-manifest.yaml
vendored
@@ -1,3 +1,38 @@
|
||||
# v1 manifests
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Images
|
||||
metadata:
|
||||
name: hauler-content-images-example
|
||||
spec:
|
||||
images:
|
||||
- name: busybox
|
||||
- name: busybox:stable
|
||||
platform: linux/amd64
|
||||
- name: gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Charts
|
||||
metadata:
|
||||
name: hauler-content-charts-example
|
||||
spec:
|
||||
charts:
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
version: 2.8.5
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: hauler-content-files-example
|
||||
spec:
|
||||
files:
|
||||
- path: https://get.rke2.io
|
||||
name: install.sh
|
||||
- path: testdata/hauler-manifest.yaml
|
||||
---
|
||||
# v1alpha1 manifests
|
||||
apiVersion: content.hauler.cattle.io/v1alpha1
|
||||
kind: Images
|
||||
metadata:
|
||||
|
||||
Reference in New Issue
Block a user