mirror of
https://github.com/hauler-dev/hauler.git
synced 2026-02-19 20:40:18 +00:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e42f72f4b8 |
8
.dockerignore
Normal file
8
.dockerignore
Normal file
@@ -0,0 +1,8 @@
|
||||
*
|
||||
!cmd
|
||||
!go.mod
|
||||
!go.sum
|
||||
!internal
|
||||
!Makefile
|
||||
!pkg
|
||||
!static
|
||||
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -8,7 +8,7 @@ assignees: ''
|
||||
|
||||
<!-- Thank you for helping us to improve Hauler! We welcome all requests for enhancements (RFEs). Please fill out each area of the template so we can better assist you. Comments like this will be hidden when you submit, but you can delete them if you wish. -->
|
||||
|
||||
**Is this Feature/Enhancement related to an Existing Problem? If so, please describe:**
|
||||
**Is this RFE related to an Existing Problem? If so, please describe:**
|
||||
|
||||
<!-- Provide a clear and concise description of the problem -->
|
||||
|
||||
|
||||
47
.github/workflows/pages.yaml
vendored
47
.github/workflows/pages.yaml
vendored
@@ -1,47 +0,0 @@
|
||||
name: Pages Workflow
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
deploy-pages:
|
||||
name: Deploy GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v5
|
||||
|
||||
- name: Upload Pages Artifacts
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: './static'
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
61
.github/workflows/release.yaml
vendored
61
.github/workflows/release.yaml
vendored
@@ -7,10 +7,10 @@ on:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
name: GoReleaser Job
|
||||
go-release:
|
||||
name: Go Release Job
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -25,8 +25,34 @@ jobs:
|
||||
- name: Set Up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
go-version: 1.21.x
|
||||
|
||||
- name: Run Go Releaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: "~> v2"
|
||||
args: "release --clean -p 1"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
|
||||
|
||||
container-release:
|
||||
name: Container Release Job
|
||||
runs-on: ubuntu-latest
|
||||
needs: [go-release]
|
||||
timeout-minutes: 30
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Set Up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
@@ -48,13 +74,20 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
- name: Build and Push Release Container to GitHub Container Registry
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: "~> v2"
|
||||
args: "release --clean --timeout 60m"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
context: .
|
||||
target: release
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ghcr.io/${{ github.repository }}:${{ github.ref_name }}, docker.io/hauler/hauler:${{ github.ref_name }}
|
||||
|
||||
- name: Build and Push Debug Container to GitHub Container Registry
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
target: debug
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ghcr.io/${{ github.repository }}-debug:${{ github.ref_name }}, docker.io/hauler/hauler-debug:${{ github.ref_name }}
|
||||
|
||||
337
.github/workflows/tests.yaml
vendored
337
.github/workflows/tests.yaml
vendored
@@ -1,337 +0,0 @@
|
||||
name: Tests Workflow
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
unit-tests:
|
||||
name: Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Set Up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
|
||||
- name: Install Go Releaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
install-only: true
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y make
|
||||
sudo apt-get install -y build-essential
|
||||
|
||||
- name: Run Makefile Targets
|
||||
run: |
|
||||
make build-all
|
||||
|
||||
- name: Upload Hauler Binaries
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: hauler-binaries
|
||||
path: dist/*
|
||||
|
||||
- name: Upload Coverage Report
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-report
|
||||
path: coverage.out
|
||||
|
||||
integration-tests:
|
||||
name: Integration Tests
|
||||
runs-on: ubuntu-latest
|
||||
needs: [unit-tests]
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y unzip
|
||||
sudo apt-get install -y tree
|
||||
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: hauler-binaries
|
||||
path: dist
|
||||
|
||||
- name: Prepare Hauler for Tests
|
||||
run: |
|
||||
pwd
|
||||
ls -la
|
||||
ls -la dist/
|
||||
chmod -R 755 dist/ testdata/certificate-script.sh
|
||||
sudo mv dist/hauler_linux_amd64_v1/hauler /usr/local/bin/hauler
|
||||
./testdata/certificate-script.sh && sudo chown -R $(whoami) testdata/certs/
|
||||
|
||||
- name: Verify - hauler version
|
||||
run: |
|
||||
hauler version
|
||||
|
||||
- name: Verify - hauler completion
|
||||
run: |
|
||||
hauler completion
|
||||
hauler completion bash
|
||||
hauler completion fish
|
||||
hauler completion powershell
|
||||
hauler completion zsh
|
||||
|
||||
- name: Verify - hauler help
|
||||
run: |
|
||||
hauler help
|
||||
|
||||
- name: Verify - hauler login
|
||||
run: |
|
||||
hauler login --help
|
||||
hauler login docker.io --username ${{ secrets.DOCKERHUB_USERNAME }} --password ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | hauler login ghcr.io -u ${{ github.repository_owner }} --password-stdin
|
||||
|
||||
- name: Remove Hauler Store Credentials
|
||||
run: |
|
||||
rm -rf ~/.docker/config.json
|
||||
|
||||
- name: Verify - hauler store
|
||||
run: |
|
||||
hauler store --help
|
||||
|
||||
- name: Verify - hauler store add
|
||||
run: |
|
||||
hauler store add --help
|
||||
|
||||
- name: Verify - hauler store add chart
|
||||
run: |
|
||||
hauler store add chart --help
|
||||
# verify via helm repository
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.4
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.3 --verify
|
||||
# verify via oci helm repository
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --version 1.0.6
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --version 1.0.4 --verify
|
||||
# verify via local helm repository
|
||||
curl -sfOL https://github.com/rancherfederal/rancher-cluster-templates/releases/download/rancher-cluster-templates-0.5.2/rancher-cluster-templates-0.5.2.tgz
|
||||
hauler store add chart rancher-cluster-templates-0.5.2.tgz --repo .
|
||||
curl -sfOL https://github.com/rancherfederal/rancher-cluster-templates/releases/download/rancher-cluster-templates-0.5.1/rancher-cluster-templates-0.5.1.tgz
|
||||
hauler store add chart rancher-cluster-templates-0.5.1.tgz --repo . --version 0.5.1
|
||||
curl -sfOL https://github.com/rancherfederal/rancher-cluster-templates/releases/download/rancher-cluster-templates-0.5.0/rancher-cluster-templates-0.5.0.tgz
|
||||
hauler store add chart rancher-cluster-templates-0.5.0.tgz --repo . --version 0.5.0 --verify
|
||||
# verify via the hauler store contents
|
||||
hauler store info
|
||||
|
||||
- name: Verify - hauler store add file
|
||||
run: |
|
||||
hauler store add file --help
|
||||
# verify via remote file
|
||||
hauler store add file https://get.rke2.io/install.sh
|
||||
hauler store add file https://get.rke2.io/install.sh --name rke2-install.sh
|
||||
# verify via local file
|
||||
hauler store add file testdata/hauler-manifest.yaml
|
||||
hauler store add file testdata/hauler-manifest.yaml --name hauler-manifest-local.yaml
|
||||
# verify via the hauler store contents
|
||||
hauler store info
|
||||
|
||||
- name: Verify - hauler store add image
|
||||
run: |
|
||||
hauler store add image --help
|
||||
# verify via image reference
|
||||
hauler store add image busybox
|
||||
# verify via image reference with version and platform
|
||||
hauler store add image busybox:stable --platform linux/amd64
|
||||
# verify via image reference with full reference
|
||||
hauler store add image gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
# verify via the hauler store contents
|
||||
hauler store info
|
||||
|
||||
- name: Verify - hauler store copy
|
||||
run: |
|
||||
hauler store copy --help
|
||||
# need more tests here
|
||||
|
||||
- name: Verify - hauler store extract
|
||||
run: |
|
||||
hauler store extract --help
|
||||
# verify via extracting hauler store content
|
||||
hauler store extract hauler/hauler-manifest-local.yaml:latest
|
||||
# view extracted content from store
|
||||
cat hauler-manifest-local.yaml
|
||||
|
||||
- name: Verify - hauler store info
|
||||
run: |
|
||||
hauler store info --help
|
||||
# verify via table output
|
||||
hauler store info --output table
|
||||
# verify via json output
|
||||
hauler store info --output json
|
||||
# verify via filtered output (chart)
|
||||
hauler store info --type chart
|
||||
# verify via filtered output (file)
|
||||
hauler store info --type file
|
||||
# verify via filtered output (image)
|
||||
hauler store info --type image
|
||||
# verify store directory structure
|
||||
tree -hC store
|
||||
|
||||
- name: Verify - hauler store save
|
||||
run: |
|
||||
hauler store save --help
|
||||
# verify via save
|
||||
hauler store save
|
||||
# verify via save with filename
|
||||
hauler store save --filename store.tar.zst
|
||||
# verify via save with filename and platform (amd64)
|
||||
hauler store save --filename store-amd64.tar.zst --platform linux/amd64
|
||||
|
||||
- name: Remove Hauler Store Contents
|
||||
run: |
|
||||
rm -rf store
|
||||
hauler store info
|
||||
|
||||
- name: Verify - hauler store load
|
||||
run: |
|
||||
hauler store load --help
|
||||
# verify via load
|
||||
hauler store load
|
||||
# verify via load with multiple files
|
||||
hauler store load --filename haul.tar.zst --filename store.tar.zst
|
||||
# verify via load with filename and temp directory
|
||||
hauler store load --filename store.tar.zst --tempdir /opt
|
||||
# verify via load with filename and platform (amd64)
|
||||
hauler store load --filename store-amd64.tar.zst
|
||||
|
||||
- name: Verify Hauler Store Contents
|
||||
run: |
|
||||
# verify store
|
||||
hauler store info
|
||||
# verify store directory structure
|
||||
tree -hC store
|
||||
|
||||
- name: Verify - docker load
|
||||
run: |
|
||||
docker load --help
|
||||
# verify via load
|
||||
docker load --input store-amd64.tar.zst
|
||||
|
||||
- name: Verify Docker Images Contents
|
||||
run: |
|
||||
docker images --help
|
||||
# verify images
|
||||
docker images --all
|
||||
|
||||
- name: Remove Hauler Store Contents
|
||||
run: |
|
||||
rm -rf store haul.tar.zst store.tar.zst store-amd64.tar.zst
|
||||
hauler store info
|
||||
|
||||
- name: Verify - hauler store sync
|
||||
run: |
|
||||
hauler store sync --help
|
||||
# download local helm repository
|
||||
curl -sfOL https://github.com/rancherfederal/rancher-cluster-templates/releases/download/rancher-cluster-templates-0.5.2/rancher-cluster-templates-0.5.2.tgz
|
||||
# verify via sync
|
||||
hauler store sync --filename testdata/hauler-manifest-pipeline.yaml
|
||||
# verify via sync with multiple files
|
||||
hauler store sync --filename testdata/hauler-manifest-pipeline.yaml --filename testdata/hauler-manifest.yaml
|
||||
# need more tests here
|
||||
|
||||
- name: Verify - hauler store serve
|
||||
run: |
|
||||
hauler store serve --help
|
||||
|
||||
- name: Verify - hauler store serve registry
|
||||
run: |
|
||||
hauler store serve registry --help
|
||||
# verify via registry
|
||||
hauler store serve registry &
|
||||
until curl -sf http://localhost:5000/v2/_catalog; do : ; done
|
||||
pkill -f "hauler store serve registry"
|
||||
# verify via registry with different port
|
||||
hauler store serve registry --port 5001 &
|
||||
until curl -sf http://localhost:5001/v2/_catalog; do : ; done
|
||||
pkill -f "hauler store serve registry --port 5001"
|
||||
# verify via registry with different port and readonly
|
||||
hauler store serve registry --port 5001 --readonly &
|
||||
until curl -sf http://localhost:5001/v2/_catalog; do : ; done
|
||||
pkill -f "hauler store serve registry --port 5001 --readonly"
|
||||
# verify via registry with different port with readonly with tls
|
||||
# hauler store serve registry --port 5001 --readonly --tls-cert testdata/certs/server-cert.crt --tls-key testdata/certs/server-cert.key &
|
||||
# until curl -sf --cacert testdata/certs/cacerts.pem https://localhost:5001/v2/_catalog; do : ; done
|
||||
# pkill -f "hauler store serve registry --port 5001 --readonly --tls-cert testdata/certs/server-cert.crt --tls-key testdata/certs/server-cert.key"
|
||||
|
||||
- name: Verify - hauler store serve fileserver
|
||||
run: |
|
||||
hauler store serve fileserver --help
|
||||
# verify via fileserver
|
||||
hauler store serve fileserver &
|
||||
until curl -sf http://localhost:8080; do : ; done
|
||||
pkill -f "hauler store serve fileserver"
|
||||
# verify via fileserver with different port
|
||||
hauler store serve fileserver --port 8000 &
|
||||
until curl -sf http://localhost:8000; do : ; done
|
||||
pkill -f "hauler store serve fileserver --port 8000"
|
||||
# verify via fileserver with different port and timeout
|
||||
hauler store serve fileserver --port 8000 --timeout 120 &
|
||||
until curl -sf http://localhost:8000; do : ; done
|
||||
pkill -f "hauler store serve fileserver --port 8000 --timeout 120"
|
||||
# verify via fileserver with different port with timeout and tls
|
||||
# hauler store serve fileserver --port 8000 --timeout 120 --tls-cert testdata/certs/server-cert.crt --tls-key testdata/certs/server-cert.key &
|
||||
# until curl -sf --cacert testdata/certs/cacerts.pem https://localhost:8000; do : ; done
|
||||
# pkill -f "hauler store serve fileserver --port 8000 --timeout 120 --tls-cert testdata/certs/server-cert.crt --tls-key testdata/certs/server-cert.key"
|
||||
|
||||
- name: Verify Hauler Store Contents
|
||||
run: |
|
||||
# verify store
|
||||
hauler store info
|
||||
# verify store directory structure
|
||||
tree -hC store
|
||||
# verify registry directory structure
|
||||
tree -hC registry
|
||||
# verify fileserver directory structure
|
||||
tree -hC fileserver
|
||||
|
||||
- name: Create Hauler Report
|
||||
run: |
|
||||
hauler version >> hauler-report.txt
|
||||
hauler store info --output table >> hauler-report.txt
|
||||
|
||||
- name: Remove Hauler Store Contents
|
||||
run: |
|
||||
rm -rf store registry fileserver
|
||||
hauler store info
|
||||
|
||||
- name: Upload Hauler Report
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: hauler-report
|
||||
path: hauler-report.txt
|
||||
43
.github/workflows/unittest.yaml
vendored
Normal file
43
.github/workflows/unittest.yaml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Unit Test Workflow
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
unit-test:
|
||||
name: Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Set Up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
|
||||
- name: Run Unit Tests
|
||||
run: |
|
||||
mkdir -p cmd/hauler/binaries
|
||||
touch cmd/hauler/binaries/dummy.txt
|
||||
go test -race -covermode=atomic -coverprofile=coverage.out ./...
|
||||
|
||||
- name: Upload Coverage Report
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-report
|
||||
path: coverage.out
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
.DS_Store
|
||||
**/.DS_Store
|
||||
.idea
|
||||
.vscode
|
||||
@@ -7,12 +8,12 @@
|
||||
*.sln
|
||||
*.sw?
|
||||
*.dir-locals.el
|
||||
artifacts
|
||||
local-artifacts
|
||||
airgap-scp.sh
|
||||
dist/
|
||||
tmp/
|
||||
bin/
|
||||
/store/
|
||||
registry/
|
||||
fileserver/
|
||||
/registry/
|
||||
cmd/hauler/binaries
|
||||
testdata/certs/
|
||||
coverage.out
|
||||
|
||||
@@ -5,20 +5,18 @@ before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go mod download
|
||||
- go fmt ./...
|
||||
- go vet ./...
|
||||
- go test ./... -cover -race -covermode=atomic -coverprofile=coverage.out
|
||||
- rm -rf cmd/hauler/binaries
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
make_latest: false
|
||||
|
||||
env:
|
||||
- vpkg=hauler.dev/go/hauler/internal/version
|
||||
- cosign_version=v2.2.3+carbide.3
|
||||
- vpkg=github.com/rancherfederal/hauler/internal/version
|
||||
- cosign_version=v2.2.3+carbide.2
|
||||
|
||||
builds:
|
||||
- dir: ./cmd/hauler/.
|
||||
- main: cmd/hauler/main.go
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
@@ -28,9 +26,14 @@ builds:
|
||||
- arm64
|
||||
ldflags:
|
||||
- -s -w -X {{ .Env.vpkg }}.gitVersion={{ .Version }} -X {{ .Env.vpkg }}.gitCommit={{ .ShortCommit }} -X {{ .Env.vpkg }}.gitTreeState={{if .IsGitDirty}}dirty{{else}}clean{{end}} -X {{ .Env.vpkg }}.buildDate={{ .Date }}
|
||||
hooks:
|
||||
pre:
|
||||
- mkdir -p cmd/hauler/binaries
|
||||
- wget -P cmd/hauler/binaries/ https://github.com/hauler-dev/cosign/releases/download/{{ .Env.cosign_version }}/cosign-{{ .Os }}-{{ .Arch }}{{ if eq .Os "windows" }}.exe{{ end }}
|
||||
post:
|
||||
- rm -rf cmd/hauler/binaries
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
- GOEXPERIMENT=boringcrypto
|
||||
|
||||
universal_binaries:
|
||||
- replace: false
|
||||
@@ -47,75 +50,3 @@ brews:
|
||||
token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}"
|
||||
directory: Formula
|
||||
description: "Hauler CLI"
|
||||
|
||||
dockers:
|
||||
- id: hauler-amd64
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
use: buildx
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/amd64"
|
||||
- "--target=release"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-amd64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-amd64:{{ .Version }}"
|
||||
- id: hauler-arm64
|
||||
goos: linux
|
||||
goarch: arm64
|
||||
use: buildx
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/arm64"
|
||||
- "--target=release"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-arm64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-arm64:{{ .Version }}"
|
||||
- id: hauler-debug-amd64
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
use: buildx
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/amd64"
|
||||
- "--target=debug"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-debug-amd64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-debug-amd64:{{ .Version }}"
|
||||
- id: hauler-debug-arm64
|
||||
goos: linux
|
||||
goarch: arm64
|
||||
use: buildx
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/arm64"
|
||||
- "--target=debug"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-debug-arm64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-debug-arm64:{{ .Version }}"
|
||||
|
||||
docker_manifests:
|
||||
- id: hauler-docker
|
||||
use: docker
|
||||
name_template: "docker.io/hauler/hauler:{{ .Version }}"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-amd64:{{ .Version }}"
|
||||
- "docker.io/hauler/hauler-arm64:{{ .Version }}"
|
||||
- id: hauler-ghcr
|
||||
use: docker
|
||||
name_template: "ghcr.io/hauler-dev/hauler:{{ .Version }}"
|
||||
image_templates:
|
||||
- "ghcr.io/hauler-dev/hauler-amd64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-arm64:{{ .Version }}"
|
||||
- id: hauler-debug-docker
|
||||
use: docker
|
||||
name_template: "docker.io/hauler/hauler-debug:{{ .Version }}"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-debug-amd64:{{ .Version }}"
|
||||
- "docker.io/hauler/hauler-debug-arm64:{{ .Version }}"
|
||||
- id: hauler-debug-ghcr
|
||||
use: docker
|
||||
name_template: "ghcr.io/hauler-dev/hauler-debug:{{ .Version }}"
|
||||
image_templates:
|
||||
- "ghcr.io/hauler-dev/hauler-debug-amd64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-debug-arm64:{{ .Version }}"
|
||||
|
||||
13
Dockerfile
13
Dockerfile
@@ -1,8 +1,11 @@
|
||||
# builder stage
|
||||
FROM registry.suse.com/bci/bci-base:15.5 AS builder
|
||||
FROM registry.suse.com/bci/golang:1.21 AS builder
|
||||
|
||||
# fetched from goreleaser build proccess
|
||||
COPY hauler /hauler
|
||||
RUN zypper --non-interactive install make bash wget ca-certificates
|
||||
|
||||
COPY . /build
|
||||
WORKDIR /build
|
||||
RUN make build
|
||||
|
||||
RUN echo "hauler:x:1001:1001::/home/hauler:" > /etc/passwd \
|
||||
&& echo "hauler:x:1001:hauler" > /etc/group \
|
||||
@@ -22,7 +25,7 @@ COPY --from=builder --chown=hauler:hauler /tmp/. /tmp
|
||||
COPY --from=builder --chown=hauler:hauler /store/. /store
|
||||
COPY --from=builder --chown=hauler:hauler /registry/. /registry
|
||||
COPY --from=builder --chown=hauler:hauler /fileserver/. /fileserver
|
||||
COPY --from=builder --chown=hauler:hauler /hauler /hauler
|
||||
COPY --from=builder --chown=hauler:hauler /build/bin/hauler /
|
||||
|
||||
USER hauler
|
||||
ENTRYPOINT [ "/hauler" ]
|
||||
@@ -34,7 +37,7 @@ COPY --from=builder /var/lib/ca-certificates/ca-bundle.pem /etc/ssl/certs/ca-cer
|
||||
COPY --from=builder /etc/passwd /etc/passwd
|
||||
COPY --from=builder /etc/group /etc/group
|
||||
COPY --from=builder --chown=hauler:hauler /home/hauler/. /home/hauler
|
||||
COPY --from=builder --chown=hauler:hauler /hauler /usr/local/bin/hauler
|
||||
COPY --from=builder --chown=hauler:hauler /build/bin/hauler /bin/hauler
|
||||
|
||||
RUN apk --no-cache add curl
|
||||
|
||||
|
||||
60
Makefile
60
Makefile
@@ -1,49 +1,39 @@
|
||||
# Makefile for hauler
|
||||
SHELL:=/bin/bash
|
||||
GO_FILES=$(shell go list ./... | grep -v /vendor/)
|
||||
|
||||
# set shell
|
||||
SHELL=/bin/bash
|
||||
COSIGN_VERSION=v2.2.3+carbide.2
|
||||
|
||||
# set go variables
|
||||
GO_FILES=./...
|
||||
GO_COVERPROFILE=coverage.out
|
||||
.SILENT:
|
||||
|
||||
# set build variables
|
||||
BIN_DIRECTORY=bin
|
||||
DIST_DIRECTORY=dist
|
||||
all: fmt vet install test
|
||||
|
||||
# local build of hauler for current platform
|
||||
# references/configuration from .goreleaser.yaml
|
||||
build:
|
||||
goreleaser build --clean --snapshot --timeout 60m --single-target
|
||||
rm -rf cmd/hauler/binaries;\
|
||||
mkdir -p cmd/hauler/binaries;\
|
||||
wget -P cmd/hauler/binaries/ https://github.com/hauler-dev/cosign/releases/download/$(COSIGN_VERSION)/cosign-$(shell go env GOOS)-$(shell go env GOARCH);\
|
||||
mkdir bin;\
|
||||
CGO_ENABLED=0 go build -o bin ./cmd/...;\
|
||||
|
||||
# local build of hauler for all platforms
|
||||
# references/configuration from .goreleaser.yaml
|
||||
build-all:
|
||||
goreleaser build --clean --snapshot --timeout 60m
|
||||
build-all: fmt vet
|
||||
goreleaser build --clean --snapshot
|
||||
|
||||
# local release of hauler for all platforms
|
||||
# references/configuration from .goreleaser.yaml
|
||||
release:
|
||||
goreleaser release --clean --snapshot --timeout 60m
|
||||
|
||||
# install depedencies
|
||||
install:
|
||||
go mod tidy
|
||||
go mod download
|
||||
CGO_ENABLED=0 go install ./cmd/...
|
||||
rm -rf cmd/hauler/binaries;\
|
||||
mkdir -p cmd/hauler/binaries;\
|
||||
wget -P cmd/hauler/binaries/ https://github.com/hauler-dev/cosign/releases/download/$(COSIGN_VERSION)/cosign-$(shell go env GOOS)-$(shell go env GOARCH);\
|
||||
CGO_ENABLED=0 go install ./cmd/...;\
|
||||
|
||||
# format go code
|
||||
fmt:
|
||||
go fmt $(GO_FILES)
|
||||
|
||||
# vet go code
|
||||
vet:
|
||||
go vet $(GO_FILES)
|
||||
|
||||
# test go code
|
||||
test:
|
||||
go test $(GO_FILES) -cover -race -covermode=atomic -coverprofile=$(GO_COVERPROFILE)
|
||||
fmt:
|
||||
go fmt $(GO_FILES)
|
||||
|
||||
test:
|
||||
go test $(GO_FILES) -cover
|
||||
|
||||
integration_test:
|
||||
go test -tags=integration $(GO_FILES)
|
||||
|
||||
# cleanup artifacts
|
||||
clean:
|
||||
rm -rf $(BIN_DIRECTORY) $(DIST_DIRECTORY) $(GO_COVERPROFILE)
|
||||
rm -rf bin 2> /dev/null
|
||||
|
||||
23
README.md
23
README.md
@@ -4,33 +4,14 @@
|
||||
|
||||
## Airgap Swiss Army Knife
|
||||
|
||||
> ⚠️ **Please Note:** Hauler and the Hauler Documentation are recently Generally Available (GA).
|
||||
|
||||
`Rancher Government Hauler` simplifies the airgap experience without requiring operators to adopt a specific workflow. **Hauler** simplifies the airgapping process, by representing assets (images, charts, files, etc...) as content and collections to allow operators to easily fetch, store, package, and distribute these assets with declarative manifests or through the command line.
|
||||
|
||||
`Hauler` does this by storing contents and collections as OCI Artifacts and allows operators to serve contents and collections with an embedded registry and fileserver. Additionally, `Hauler` has the ability to store and inspect various non-image OCI Artifacts.
|
||||
|
||||
For more information, please review the **[Hauler Documentation](https://hauler.dev)!**
|
||||
|
||||
## Recent Changes
|
||||
|
||||
### In Hauler v1.2.0...
|
||||
|
||||
- Upgraded the `apiVersion` to `v1` from `v1alpha1`
|
||||
- Users are able to use `v1` and `v1alpha1`, but `v1alpha1` is now deprecated and will be removed in a future release. We will update the community when we fully deprecate and remove the functionality of `v1alpha1`
|
||||
- Users will see logging notices when using the old `apiVersion` such as...
|
||||
- `!!! DEPRECATION WARNING !!! apiVersion [v1alpha1] will be removed in a future release !!! DEPRECATION WARNING !!!`
|
||||
---
|
||||
- Updated the behavior of `hauler store load` to default to loading a `haul` with the name of `haul.tar.zst` and requires the flag of `--filename/-f` to load a `haul` with a different name
|
||||
- Users can load multiple `hauls` by specifying multiple flags of `--filename/-f`
|
||||
- updated command usage: `hauler store load --filename hauling-hauls.tar.zst`
|
||||
- previous command usage (do not use): `hauler store load hauling-hauls.tar.zst`
|
||||
---
|
||||
- Updated the behavior of `hauler store sync` to default to syncing a `manifest` with the name of `hauler-manifest.yaml` and requires the flag of `--filename/-f` to sync a `manifest` with a different name
|
||||
- Users can sync multiple `manifests` by specifying multiple flags of `--filename/-f`
|
||||
- updated command usage: `hauler store sync --filename hauling-hauls-manifest.yaml`
|
||||
- previous command usage (do not use): `hauler store sync --files hauling-hauls-manifest.yaml`
|
||||
---
|
||||
Please review the documentation for any additional [Known Limits, Issues, and Notices](https://docs.hauler.dev/docs/known-limits)!
|
||||
|
||||
## Installation
|
||||
|
||||
### Linux/Darwin
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
//go:build boringcrypto
|
||||
// +build boringcrypto
|
||||
|
||||
package main
|
||||
|
||||
import _ "crypto/tls/fipsonly"
|
||||
@@ -1,25 +1,25 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
cranecmd "github.com/google/go-containerregistry/cmd/crane/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
)
|
||||
|
||||
func New(ctx context.Context, ro *flags.CliRootOpts) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "hauler",
|
||||
Short: "Airgap Swiss Army Knife",
|
||||
Example: " View the Docs: https://docs.hauler.dev\n Environment Variables: " + consts.HaulerDir + " | " + consts.HaulerTempDir + " | " + consts.HaulerStoreDir + " | " + consts.HaulerIgnoreErrors,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.SetLevel(ro.LogLevel)
|
||||
l.Debugf("running cli command [%s]", cmd.CommandPath())
|
||||
type rootOpts struct {
|
||||
logLevel string
|
||||
}
|
||||
|
||||
var ro = &rootOpts{}
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "hauler",
|
||||
Short: "Airgap Swiss Army Knife",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
l := log.FromContext(cmd.Context())
|
||||
l.SetLevel(ro.logLevel)
|
||||
l.Debugf("running cli command [%s]", cmd.CommandPath())
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -27,12 +27,14 @@ func New(ctx context.Context, ro *flags.CliRootOpts) *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
flags.AddRootFlags(cmd, ro)
|
||||
pf := cmd.PersistentFlags()
|
||||
pf.StringVarP(&ro.logLevel, "log-level", "l", "info", "")
|
||||
|
||||
cmd.AddCommand(cranecmd.NewCmdAuthLogin("hauler"))
|
||||
addStore(cmd, ro)
|
||||
addVersion(cmd, ro)
|
||||
addCompletion(cmd, ro)
|
||||
// Add subcommands
|
||||
addLogin(cmd)
|
||||
addStore(cmd)
|
||||
addVersion(cmd)
|
||||
addCompletion(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -5,30 +5,35 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
)
|
||||
|
||||
func addCompletion(parent *cobra.Command, ro *flags.CliRootOpts) {
|
||||
func addCompletion(parent *cobra.Command) {
|
||||
cmd := &cobra.Command{
|
||||
Use: "completion",
|
||||
Short: "Generate auto-completion scripts for various shells",
|
||||
Short: "Generates completion scripts for various shells",
|
||||
Long: `The completion sub-command generates completion scripts for various shells.`,
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
addCompletionZsh(ro),
|
||||
addCompletionBash(ro),
|
||||
addCompletionFish(ro),
|
||||
addCompletionPowershell(ro),
|
||||
addCompletionZsh(),
|
||||
addCompletionBash(),
|
||||
addCompletionFish(),
|
||||
addCompletionPowershell(),
|
||||
)
|
||||
|
||||
parent.AddCommand(cmd)
|
||||
}
|
||||
|
||||
func addCompletionZsh(ro *flags.CliRootOpts) *cobra.Command {
|
||||
func completionError(err error) ([]string, cobra.ShellCompDirective) {
|
||||
cobra.CompError(err.Error())
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
func addCompletionZsh() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "zsh",
|
||||
Short: "Generates auto-completion scripts for zsh",
|
||||
Short: "Generates zsh completion scripts",
|
||||
Long: `The completion sub-command generates completion scripts for zsh.`,
|
||||
Example: `To load completion run
|
||||
|
||||
. <(hauler completion zsh)
|
||||
@@ -46,7 +51,7 @@ func addCompletionZsh(ro *flags.CliRootOpts) *cobra.Command {
|
||||
mv _hauler ~/.oh-my-zsh/completions # oh-my-zsh
|
||||
mv _hauler ~/.zprezto/modules/completion/external/src/ # zprezto`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Root().GenZshCompletion(os.Stdout)
|
||||
cmd.GenZshCompletion(os.Stdout)
|
||||
// Cobra doesn't source zsh completion file, explicitly doing it here
|
||||
fmt.Println("compdef _hauler hauler")
|
||||
},
|
||||
@@ -54,10 +59,11 @@ func addCompletionZsh(ro *flags.CliRootOpts) *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addCompletionBash(ro *flags.CliRootOpts) *cobra.Command {
|
||||
func addCompletionBash() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "bash",
|
||||
Short: "Generates auto-completion scripts for bash",
|
||||
Short: "Generates bash completion scripts",
|
||||
Long: `The completion sub-command generates completion scripts for bash.`,
|
||||
Example: `To load completion run
|
||||
|
||||
. <(hauler completion bash)
|
||||
@@ -67,32 +73,34 @@ func addCompletionBash(ro *flags.CliRootOpts) *cobra.Command {
|
||||
# ~/.bashrc or ~/.profile
|
||||
command -v hauler >/dev/null && . <(hauler completion bash)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Root().GenBashCompletion(os.Stdout)
|
||||
cmd.GenBashCompletion(os.Stdout)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addCompletionFish(ro *flags.CliRootOpts) *cobra.Command {
|
||||
func addCompletionFish() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "fish",
|
||||
Short: "Generates auto-completion scripts for fish",
|
||||
Short: "Generates fish completion scripts",
|
||||
Long: `The completion sub-command generates completion scripts for fish.`,
|
||||
Example: `To configure your fish shell to load completions for each session write this script to your completions dir:
|
||||
|
||||
hauler completion fish > ~/.config/fish/completions/hauler.fish
|
||||
|
||||
See http://fishshell.com/docs/current/index.html#completion-own for more details`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Root().GenFishCompletion(os.Stdout, true)
|
||||
cmd.GenFishCompletion(os.Stdout, true)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addCompletionPowershell(ro *flags.CliRootOpts) *cobra.Command {
|
||||
func addCompletionPowershell() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "powershell",
|
||||
Short: "Generates auto-completion scripts for powershell",
|
||||
Short: "Generates powershell completion scripts",
|
||||
Long: `The completion sub-command generates completion scripts for powershell.`,
|
||||
Example: `To load completion run
|
||||
|
||||
. <(hauler completion powershell)
|
||||
@@ -109,7 +117,7 @@ func addCompletionPowershell(ro *flags.CliRootOpts) *cobra.Command {
|
||||
cd "${XDG_CONFIG_HOME:-"$HOME/.config/"}/powershell/modules"
|
||||
hauler completion powershell >> hauler-completions.ps1`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Root().GenPowerShellCompletion(os.Stdout)
|
||||
cmd.GenPowerShellCompletion(os.Stdout)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
|
||||
75
cmd/hauler/cli/login.go
Normal file
75
cmd/hauler/cli/login.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"oras.land/oras-go/pkg/content"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/cosign"
|
||||
)
|
||||
|
||||
type Opts struct {
|
||||
Username string
|
||||
Password string
|
||||
PasswordStdin bool
|
||||
}
|
||||
|
||||
func (o *Opts) AddArgs(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.StringVarP(&o.Username, "username", "u", "", "Username")
|
||||
f.StringVarP(&o.Password, "password", "p", "", "Password")
|
||||
f.BoolVarP(&o.PasswordStdin, "password-stdin", "", false, "Take the password from stdin")
|
||||
}
|
||||
|
||||
func addLogin(parent *cobra.Command) {
|
||||
o := &Opts{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "login",
|
||||
Short: "Log in to a registry",
|
||||
Example: `
|
||||
# Log in to reg.example.com
|
||||
hauler login reg.example.com -u bob -p haulin`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, arg []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
if o.PasswordStdin {
|
||||
contents, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.Password = strings.TrimSuffix(string(contents), "\n")
|
||||
o.Password = strings.TrimSuffix(o.Password, "\r")
|
||||
}
|
||||
|
||||
if o.Username == "" && o.Password == "" {
|
||||
return fmt.Errorf("username and password required")
|
||||
}
|
||||
|
||||
return login(ctx, o, arg[0])
|
||||
},
|
||||
}
|
||||
o.AddArgs(cmd)
|
||||
|
||||
parent.AddCommand(cmd)
|
||||
}
|
||||
|
||||
func login(ctx context.Context, o *Opts, registry string) error {
|
||||
ropts := content.RegistryOptions{
|
||||
Username: o.Username,
|
||||
Password: o.Password,
|
||||
}
|
||||
|
||||
err := cosign.RegistryLogin(ctx, nil, registry, ropts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -6,43 +6,44 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
|
||||
"hauler.dev/go/hauler/cmd/hauler/cli/store"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"github.com/rancherfederal/hauler/cmd/hauler/cli/store"
|
||||
)
|
||||
|
||||
func addStore(parent *cobra.Command, ro *flags.CliRootOpts) {
|
||||
rso := &flags.StoreRootOpts{}
|
||||
var rootStoreOpts = &store.RootOpts{}
|
||||
|
||||
func addStore(parent *cobra.Command) {
|
||||
cmd := &cobra.Command{
|
||||
Use: "store",
|
||||
Aliases: []string{"s"},
|
||||
Short: "Interact with the content store",
|
||||
Short: "Interact with hauler's embedded content store",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
rso.AddFlags(cmd)
|
||||
rootStoreOpts.AddArgs(cmd)
|
||||
|
||||
cmd.AddCommand(
|
||||
addStoreSync(rso, ro),
|
||||
addStoreExtract(rso, ro),
|
||||
addStoreLoad(rso, ro),
|
||||
addStoreSave(rso, ro),
|
||||
addStoreServe(rso, ro),
|
||||
addStoreInfo(rso, ro),
|
||||
addStoreCopy(rso, ro),
|
||||
addStoreAdd(rso, ro),
|
||||
addStoreSync(),
|
||||
addStoreExtract(),
|
||||
addStoreLoad(),
|
||||
addStoreSave(),
|
||||
addStoreServe(),
|
||||
addStoreInfo(),
|
||||
addStoreCopy(),
|
||||
|
||||
// TODO: Remove this in favor of sync?
|
||||
addStoreAdd(),
|
||||
)
|
||||
|
||||
parent.AddCommand(cmd)
|
||||
}
|
||||
|
||||
func addStoreExtract(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.ExtractOpts{StoreRootOpts: rso}
|
||||
func addStoreExtract() *cobra.Command {
|
||||
o := &store.ExtractOpts{RootOpts: rootStoreOpts}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "extract",
|
||||
Short: "Extract artifacts from the content store to disk",
|
||||
Short: "Extract content from the store to disk",
|
||||
Aliases: []string{"x"},
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -56,28 +57,17 @@ func addStoreExtract(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Com
|
||||
return store.ExtractCmd(ctx, o, s, args[0])
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
o.AddArgs(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreSync(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.SyncOpts{StoreRootOpts: rso}
|
||||
func addStoreSync() *cobra.Command {
|
||||
o := &store.SyncOpts{RootOpts: rootStoreOpts}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "sync",
|
||||
Short: "Sync content to the content store",
|
||||
Args: cobra.ExactArgs(0),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Check if the products flag was passed
|
||||
if len(o.Products) > 0 {
|
||||
// Only clear the default if the user did NOT explicitly set --filename
|
||||
if !cmd.Flags().Changed("filename") {
|
||||
o.FileName = []string{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Short: "Sync content to the embedded content store",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -86,7 +76,7 @@ func addStoreSync(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
return err
|
||||
}
|
||||
|
||||
return store.SyncCmd(ctx, o, s, rso, ro)
|
||||
return store.SyncCmd(ctx, o, s)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
@@ -94,13 +84,13 @@ func addStoreSync(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreLoad(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.LoadOpts{StoreRootOpts: rso}
|
||||
func addStoreLoad() *cobra.Command {
|
||||
o := &store.LoadOpts{RootOpts: rootStoreOpts}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "load",
|
||||
Short: "Load a content store from a store archive",
|
||||
Args: cobra.ExactArgs(0),
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -110,7 +100,7 @@ func addStoreLoad(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
}
|
||||
_ = s
|
||||
|
||||
return store.LoadCmd(ctx, o, rso, ro)
|
||||
return store.LoadCmd(ctx, o, args...)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
@@ -118,28 +108,28 @@ func addStoreLoad(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreServe(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
func addStoreServe() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Serve the content store via an OCI Compliant Registry or Fileserver",
|
||||
Short: "Expose the content of a local store through an OCI compliant registry or file server",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
cmd.AddCommand(
|
||||
addStoreServeRegistry(rso, ro),
|
||||
addStoreServeFiles(rso, ro),
|
||||
addStoreServeRegistry(),
|
||||
addStoreServeFiles(),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreServeRegistry(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.ServeRegistryOpts{StoreRootOpts: rso}
|
||||
|
||||
// RegistryCmd serves the embedded registry
|
||||
func addStoreServeRegistry() *cobra.Command {
|
||||
o := &store.ServeRegistryOpts{RootOpts: rootStoreOpts}
|
||||
cmd := &cobra.Command{
|
||||
Use: "registry",
|
||||
Short: "Serve the OCI Compliant Registry",
|
||||
Short: "Serve the embedded registry",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -148,7 +138,7 @@ func addStoreServeRegistry(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cob
|
||||
return err
|
||||
}
|
||||
|
||||
return store.ServeRegistryCmd(ctx, o, s, rso, ro)
|
||||
return store.ServeRegistryCmd(ctx, o, s)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -157,12 +147,12 @@ func addStoreServeRegistry(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cob
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreServeFiles(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.ServeFilesOpts{StoreRootOpts: rso}
|
||||
|
||||
// FileServerCmd serves the file server
|
||||
func addStoreServeFiles() *cobra.Command {
|
||||
o := &store.ServeFilesOpts{RootOpts: rootStoreOpts}
|
||||
cmd := &cobra.Command{
|
||||
Use: "fileserver",
|
||||
Short: "Serve the Fileserver",
|
||||
Short: "Serve the file server",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -171,7 +161,7 @@ func addStoreServeFiles(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.
|
||||
return err
|
||||
}
|
||||
|
||||
return store.ServeFilesCmd(ctx, o, s, ro)
|
||||
return store.ServeFilesCmd(ctx, o, s)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -180,8 +170,8 @@ func addStoreServeFiles(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreSave(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.SaveOpts{StoreRootOpts: rso}
|
||||
func addStoreSave() *cobra.Command {
|
||||
o := &store.SaveOpts{RootOpts: rootStoreOpts}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "save",
|
||||
@@ -196,16 +186,16 @@ func addStoreSave(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
}
|
||||
_ = s
|
||||
|
||||
return store.SaveCmd(ctx, o, rso, ro)
|
||||
return store.SaveCmd(ctx, o, o.FileName)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
o.AddArgs(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreInfo(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.InfoOpts{StoreRootOpts: rso}
|
||||
func addStoreInfo() *cobra.Command {
|
||||
o := &store.InfoOpts{RootOpts: rootStoreOpts}
|
||||
|
||||
var allowedValues = []string{"image", "chart", "file", "sigs", "atts", "sbom", "all"}
|
||||
|
||||
@@ -235,12 +225,12 @@ func addStoreInfo(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreCopy(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.CopyOpts{StoreRootOpts: rso}
|
||||
func addStoreCopy() *cobra.Command {
|
||||
o := &store.CopyOpts{RootOpts: rootStoreOpts}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "copy",
|
||||
Short: "Copy all store content to another location",
|
||||
Short: "Copy all store contents to another OCI registry",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
@@ -250,7 +240,7 @@ func addStoreCopy(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
return err
|
||||
}
|
||||
|
||||
return store.CopyCmd(ctx, o, s, args[0], ro)
|
||||
return store.CopyCmd(ctx, o, s, args[0])
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
@@ -258,39 +248,31 @@ func addStoreCopy(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreAdd(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
func addStoreAdd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "add",
|
||||
Short: "Add content to the store",
|
||||
Short: "Add content to store",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
addStoreAddFile(rso, ro),
|
||||
addStoreAddImage(rso, ro),
|
||||
addStoreAddChart(rso, ro),
|
||||
addStoreAddFile(),
|
||||
addStoreAddImage(),
|
||||
addStoreAddChart(),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreAddFile(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.AddFileOpts{StoreRootOpts: rso}
|
||||
func addStoreAddFile() *cobra.Command {
|
||||
o := &store.AddFileOpts{RootOpts: rootStoreOpts}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "file",
|
||||
Short: "Add a file to the store",
|
||||
Example: `# fetch local file
|
||||
hauler store add file file.txt
|
||||
|
||||
# fetch remote file
|
||||
hauler store add file https://get.rke2.io/install.sh
|
||||
|
||||
# fetch remote file and assign new name
|
||||
hauler store add file https://get.hauler.dev --name hauler-install.sh`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
Short: "Add a file to the content store",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -307,28 +289,13 @@ hauler store add file https://get.hauler.dev --name hauler-install.sh`,
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreAddImage(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.AddImageOpts{StoreRootOpts: rso}
|
||||
func addStoreAddImage() *cobra.Command {
|
||||
o := &store.AddImageOpts{RootOpts: rootStoreOpts}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "image",
|
||||
Short: "Add a image to the store",
|
||||
Example: `# fetch image
|
||||
hauler store add image busybox
|
||||
|
||||
# fetch image with repository and tag
|
||||
hauler store add image library/busybox:stable
|
||||
|
||||
# fetch image with full image reference and specific platform
|
||||
hauler store add image ghcr.io/hauler-dev/hauler-debug:v1.2.0 --platform linux/amd64
|
||||
|
||||
# fetch image with full image reference via digest
|
||||
hauler store add image gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
|
||||
# fetch image with full image reference, specific platform, and signature verification
|
||||
curl -sfOL https://raw.githubusercontent.com/rancherfederal/carbide-releases/main/carbide-key.pub
|
||||
hauler store add image rgcrprod.azurecr.us/rancher/rke2-runtime:v1.31.5-rke2r1 --platform linux/amd64 --key carbide-key.pub`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
Short: "Add an image to the content store",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -337,7 +304,7 @@ hauler store add image rgcrprod.azurecr.us/rancher/rke2-runtime:v1.31.5-rke2r1 -
|
||||
return err
|
||||
}
|
||||
|
||||
return store.AddImageCmd(ctx, o, s, args[0], rso, ro)
|
||||
return store.AddImageCmd(ctx, o, s, args[0])
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
@@ -345,29 +312,28 @@ hauler store add image rgcrprod.azurecr.us/rancher/rke2-runtime:v1.31.5-rke2r1 -
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreAddChart(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.AddChartOpts{StoreRootOpts: rso, ChartOpts: &action.ChartPathOptions{}}
|
||||
func addStoreAddChart() *cobra.Command {
|
||||
o := &store.AddChartOpts{
|
||||
RootOpts: rootStoreOpts,
|
||||
ChartOpts: &action.ChartPathOptions{},
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "chart",
|
||||
Short: "Add a helm chart to the store",
|
||||
Example: `# fetch local helm chart
|
||||
hauler store add chart path/to/chart/directory --repo .
|
||||
Short: "Add a local or remote chart to the content store",
|
||||
Example: `
|
||||
# add a local chart
|
||||
hauler store add chart path/to/chart/directory
|
||||
|
||||
# fetch local compressed helm chart
|
||||
hauler store add chart path/to/chart.tar.gz --repo .
|
||||
# add a local compressed chart
|
||||
hauler store add chart path/to/chart.tar.gz
|
||||
|
||||
# fetch remote oci helm chart
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev
|
||||
# add a remote chart
|
||||
hauler store add chart longhorn --repo "https://charts.longhorn.io"
|
||||
|
||||
# fetch remote oci helm chart with version
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --version 1.2.0
|
||||
|
||||
# fetch remote helm chart
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable
|
||||
|
||||
# fetch remote helm chart with specific version
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/latest --version 2.10.1`,
|
||||
# add a specific version of a chart
|
||||
hauler store add chart rancher --repo "https://releases.rancher.com/server-charts/latest" --version "2.6.2"
|
||||
`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -2,25 +2,33 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
|
||||
"github.com/spf13/cobra"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/file"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content/chart"
|
||||
"hauler.dev/go/hauler/pkg/cosign"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file"
|
||||
"github.com/rancherfederal/hauler/pkg/content/chart"
|
||||
"github.com/rancherfederal/hauler/pkg/cosign"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/pkg/reference"
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
)
|
||||
|
||||
func AddFileCmd(ctx context.Context, o *flags.AddFileOpts, s *store.Layout, reference string) error {
|
||||
cfg := v1.File{
|
||||
type AddFileOpts struct {
|
||||
*RootOpts
|
||||
Name string
|
||||
}
|
||||
|
||||
func (o *AddFileOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.StringVarP(&o.Name, "name", "n", "", "(Optional) Name to assign to file in store")
|
||||
}
|
||||
|
||||
func AddFileCmd(ctx context.Context, o *AddFileOpts, s *store.Layout, reference string) error {
|
||||
cfg := v1alpha1.File{
|
||||
Path: reference,
|
||||
}
|
||||
if len(o.Name) > 0 {
|
||||
@@ -29,7 +37,7 @@ func AddFileCmd(ctx context.Context, o *flags.AddFileOpts, s *store.Layout, refe
|
||||
return storeFile(ctx, s, cfg)
|
||||
}
|
||||
|
||||
func storeFile(ctx context.Context, s *store.Layout, fi v1.File) error {
|
||||
func storeFile(ctx context.Context, s *store.Layout, fi v1alpha1.File) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
copts := getter.ClientOptions{
|
||||
@@ -37,90 +45,97 @@ func storeFile(ctx context.Context, s *store.Layout, fi v1.File) error {
|
||||
}
|
||||
|
||||
f := file.NewFile(fi.Path, file.WithClient(getter.NewClient(copts)))
|
||||
ref, err := reference.NewTagged(f.Name(fi.Path), consts.DefaultTag)
|
||||
ref, err := reference.NewTagged(f.Name(fi.Path), reference.DefaultTag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("adding file [%s] to the store as [%s]", fi.Path, ref.Name())
|
||||
l.Infof("adding 'file' [%s] to the store as [%s]", fi.Path, ref.Name())
|
||||
_, err = s.AddOCI(ctx, f, ref.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("successfully added file [%s]", ref.Name())
|
||||
l.Infof("successfully added 'file' [%s]", ref.Name())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func AddImageCmd(ctx context.Context, o *flags.AddImageOpts, s *store.Layout, reference string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
type AddImageOpts struct {
|
||||
*RootOpts
|
||||
Name string
|
||||
Key string
|
||||
Platform string
|
||||
}
|
||||
|
||||
cfg := v1.Image{
|
||||
func (o *AddImageOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Path to the key for digital signature verification")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specific platform to save. i.e. linux/amd64. Defaults to all if flag is omitted.")
|
||||
}
|
||||
|
||||
func AddImageCmd(ctx context.Context, o *AddImageOpts, s *store.Layout, reference string) error {
|
||||
l := log.FromContext(ctx)
|
||||
cfg := v1alpha1.Image{
|
||||
Name: reference,
|
||||
}
|
||||
|
||||
// Check if the user provided a key.
|
||||
if o.Key != "" {
|
||||
// verify signature using the provided key.
|
||||
err := cosign.VerifySignature(ctx, s, o.Key, o.Tlog, cfg.Name, rso, ro)
|
||||
err := cosign.VerifySignature(ctx, s, o.Key, cfg.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", cfg.Name)
|
||||
} else if o.CertIdentityRegexp != "" || o.CertIdentity != "" {
|
||||
// verify signature using the provided keyless details
|
||||
l.Infof("verifying keyless signature for [%s]", cfg.Name)
|
||||
err := cosign.VerifyKeylessSignature(ctx, s, o.CertIdentity, o.CertIdentityRegexp, o.CertOidcIssuer, o.CertOidcIssuerRegexp, o.CertGithubWorkflowRepository, o.Tlog, cfg.Name, rso, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Infof("keyless signature verified for image [%s]", cfg.Name)
|
||||
}
|
||||
|
||||
return storeImage(ctx, s, cfg, o.Platform, rso, ro)
|
||||
return storeImage(ctx, s, cfg, o.Platform)
|
||||
}
|
||||
|
||||
func storeImage(ctx context.Context, s *store.Layout, i v1.Image, platform string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
func storeImage(ctx context.Context, s *store.Layout, i v1alpha1.Image, platform string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if !ro.IgnoreErrors {
|
||||
envVar := os.Getenv(consts.HaulerIgnoreErrors)
|
||||
if envVar == "true" {
|
||||
ro.IgnoreErrors = true
|
||||
}
|
||||
}
|
||||
|
||||
l.Infof("adding image [%s] to the store", i.Name)
|
||||
l.Infof("adding 'image' [%s] to the store", i.Name)
|
||||
|
||||
r, err := name.ParseReference(i.Name)
|
||||
if err != nil {
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("unable to parse image [%s]: %v... skipping...", i.Name, err)
|
||||
return nil
|
||||
} else {
|
||||
l.Errorf("unable to parse image [%s]: %v", i.Name, err)
|
||||
return err
|
||||
}
|
||||
l.Warnf("unable to parse 'image' [%s], skipping...", r.Name())
|
||||
return nil
|
||||
}
|
||||
|
||||
err = cosign.SaveImage(ctx, s, r.Name(), platform, rso, ro)
|
||||
err = cosign.SaveImage(ctx, s, r.Name(), platform)
|
||||
if err != nil {
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("unable to add image [%s] to store: %v... skipping...", r.Name(), err)
|
||||
return nil
|
||||
} else {
|
||||
l.Errorf("unable to add image [%s] to store: %v", r.Name(), err)
|
||||
return err
|
||||
}
|
||||
l.Warnf("unable to add 'image' [%s] to store. skipping...", r.Name())
|
||||
return nil
|
||||
}
|
||||
|
||||
l.Infof("successfully added image [%s]", r.Name())
|
||||
l.Infof("successfully added 'image' [%s]", r.Name())
|
||||
return nil
|
||||
}
|
||||
|
||||
func AddChartCmd(ctx context.Context, o *flags.AddChartOpts, s *store.Layout, chartName string) error {
|
||||
cfg := v1.Chart{
|
||||
type AddChartOpts struct {
|
||||
*RootOpts
|
||||
|
||||
ChartOpts *action.ChartPathOptions
|
||||
}
|
||||
|
||||
func (o *AddChartOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVar(&o.ChartOpts.RepoURL, "repo", "", "chart repository url where to locate the requested chart")
|
||||
f.StringVar(&o.ChartOpts.Version, "version", "", "specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a valid range (e.g. ^2.0.0). If this is not specified, the latest version is used")
|
||||
f.BoolVar(&o.ChartOpts.Verify, "verify", false, "verify the package before using it")
|
||||
f.StringVar(&o.ChartOpts.Username, "username", "", "chart repository username where to locate the requested chart")
|
||||
f.StringVar(&o.ChartOpts.Password, "password", "", "chart repository password where to locate the requested chart")
|
||||
f.StringVar(&o.ChartOpts.CertFile, "cert-file", "", "identify HTTPS client using this SSL certificate file")
|
||||
f.StringVar(&o.ChartOpts.KeyFile, "key-file", "", "identify HTTPS client using this SSL key file")
|
||||
f.BoolVar(&o.ChartOpts.InsecureSkipTLSverify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the chart download")
|
||||
f.StringVar(&o.ChartOpts.CaFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
|
||||
}
|
||||
|
||||
func AddChartCmd(ctx context.Context, o *AddChartOpts, s *store.Layout, chartName string) error {
|
||||
// TODO: Reduce duplicates between api chart and upstream helm opts
|
||||
cfg := v1alpha1.Chart{
|
||||
Name: chartName,
|
||||
RepoURL: o.ChartOpts.RepoURL,
|
||||
Version: o.ChartOpts.Version,
|
||||
@@ -129,10 +144,9 @@ func AddChartCmd(ctx context.Context, o *flags.AddChartOpts, s *store.Layout, ch
|
||||
return storeChart(ctx, s, cfg, o.ChartOpts)
|
||||
}
|
||||
|
||||
func storeChart(ctx context.Context, s *store.Layout, cfg v1.Chart, opts *action.ChartPathOptions) error {
|
||||
func storeChart(ctx context.Context, s *store.Layout, cfg v1alpha1.Chart, opts *action.ChartPathOptions) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
l.Infof("adding chart [%s] to the store", cfg.Name)
|
||||
l.Infof("adding 'chart' [%s] to the store", cfg.Name)
|
||||
|
||||
// TODO: This shouldn't be necessary
|
||||
opts.RepoURL = cfg.RepoURL
|
||||
@@ -157,6 +171,6 @@ func storeChart(ctx context.Context, s *store.Layout, cfg v1.Chart, opts *action
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("successfully added chart [%s]", ref.Name())
|
||||
l.Infof("successfully added 'chart' [%s]", ref.Name())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,21 +5,39 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"oras.land/oras-go/pkg/content"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/cosign"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
"github.com/rancherfederal/hauler/pkg/cosign"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
)
|
||||
|
||||
func CopyCmd(ctx context.Context, o *flags.CopyOpts, s *store.Layout, targetRef string, ro *flags.CliRootOpts) error {
|
||||
type CopyOpts struct {
|
||||
*RootOpts
|
||||
|
||||
Username string
|
||||
Password string
|
||||
Insecure bool
|
||||
PlainHTTP bool
|
||||
}
|
||||
|
||||
func (o *CopyOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.Username, "username", "u", "", "Username when copying to an authenticated remote registry")
|
||||
f.StringVarP(&o.Password, "password", "p", "", "Password when copying to an authenticated remote registry")
|
||||
f.BoolVar(&o.Insecure, "insecure", false, "Toggle allowing insecure connections when copying to a remote registry")
|
||||
f.BoolVar(&o.PlainHTTP, "plain-http", false, "Toggle allowing plain http connections when copying to a remote registry")
|
||||
}
|
||||
|
||||
func CopyCmd(ctx context.Context, o *CopyOpts, s *store.Layout, targetRef string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
components := strings.SplitN(targetRef, "://", 2)
|
||||
switch components[0] {
|
||||
case "dir":
|
||||
l.Debugf("identified directory target reference of [%s]", components[1])
|
||||
l.Debugf("identified directory target reference")
|
||||
fs := content.NewFile(components[1])
|
||||
defer fs.Close()
|
||||
|
||||
@@ -29,7 +47,7 @@ func CopyCmd(ctx context.Context, o *flags.CopyOpts, s *store.Layout, targetRef
|
||||
}
|
||||
|
||||
case "registry":
|
||||
l.Debugf("identified registry target reference of [%s]", components[1])
|
||||
l.Debugf("identified registry target reference")
|
||||
ropts := content.RegistryOptions{
|
||||
Username: o.Username,
|
||||
Password: o.Password,
|
||||
@@ -37,7 +55,14 @@ func CopyCmd(ctx context.Context, o *flags.CopyOpts, s *store.Layout, targetRef
|
||||
PlainHTTP: o.PlainHTTP,
|
||||
}
|
||||
|
||||
err := cosign.LoadImages(ctx, s, components[1], o.Only, ropts, ro)
|
||||
if ropts.Username != "" {
|
||||
err := cosign.RegistryLogin(ctx, s, components[1], ropts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := cosign.LoadImages(ctx, s, components[1], ropts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,15 +7,26 @@ import (
|
||||
"strings"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/internal/mapper"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
"github.com/rancherfederal/hauler/internal/mapper"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/pkg/reference"
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
)
|
||||
|
||||
func ExtractCmd(ctx context.Context, o *flags.ExtractOpts, s *store.Layout, ref string) error {
|
||||
type ExtractOpts struct {
|
||||
*RootOpts
|
||||
DestinationDir string
|
||||
}
|
||||
|
||||
func (o *ExtractOpts) AddArgs(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.DestinationDir, "output", "o", "", "Directory to save contents to (defaults to current directory)")
|
||||
}
|
||||
|
||||
func ExtractCmd(ctx context.Context, o *ExtractOpts, s *store.Layout, ref string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
r, err := reference.Parse(ref)
|
||||
@@ -23,12 +34,10 @@ func ExtractCmd(ctx context.Context, o *flags.ExtractOpts, s *store.Layout, ref
|
||||
return err
|
||||
}
|
||||
|
||||
// use the repository from the context and the identifier from the reference
|
||||
repo := r.Context().RepositoryStr() + ":" + r.Identifier()
|
||||
|
||||
found := false
|
||||
if err := s.Walk(func(reference string, desc ocispec.Descriptor) error {
|
||||
if !strings.Contains(reference, repo) {
|
||||
|
||||
if !strings.Contains(reference, r.Name()) {
|
||||
return nil
|
||||
}
|
||||
found = true
|
||||
|
||||
54
cmd/hauler/cli/store/flags.go
Normal file
54
cmd/hauler/cli/store/flags.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultStoreName = "store"
|
||||
)
|
||||
|
||||
type RootOpts struct {
|
||||
StoreDir string
|
||||
CacheDir string
|
||||
}
|
||||
|
||||
func (o *RootOpts) AddArgs(cmd *cobra.Command) {
|
||||
pf := cmd.PersistentFlags()
|
||||
pf.StringVarP(&o.StoreDir, "store", "s", DefaultStoreName, "Location to create store at")
|
||||
pf.StringVar(&o.CacheDir, "cache", "", "(deprecated flag and currently not used)")
|
||||
}
|
||||
|
||||
func (o *RootOpts) Store(ctx context.Context) (*store.Layout, error) {
|
||||
l := log.FromContext(ctx)
|
||||
dir := o.StoreDir
|
||||
|
||||
abs, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.Debugf("using store at %s", abs)
|
||||
if _, err := os.Stat(abs); errors.Is(err, os.ErrNotExist) {
|
||||
err := os.Mkdir(abs, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s, err := store.NewLayout(abs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
@@ -9,14 +9,31 @@ import (
|
||||
|
||||
"github.com/olekukonko/tablewriter"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/reference"
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
)
|
||||
|
||||
func InfoCmd(ctx context.Context, o *flags.InfoOpts, s *store.Layout) error {
|
||||
type InfoOpts struct {
|
||||
*RootOpts
|
||||
|
||||
OutputFormat string
|
||||
TypeFilter string
|
||||
SizeUnit string
|
||||
}
|
||||
|
||||
func (o *InfoOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.OutputFormat, "output", "o", "table", "Output format (table, json)")
|
||||
f.StringVarP(&o.TypeFilter, "type", "t", "all", "Filter on type (image, chart, file, sigs, atts, sbom)")
|
||||
|
||||
// TODO: Regex/globbing
|
||||
}
|
||||
|
||||
func InfoCmd(ctx context.Context, o *InfoOpts, s *store.Layout) error {
|
||||
var items []item
|
||||
if err := s.Walk(func(ref string, desc ocispec.Descriptor) error {
|
||||
if _, ok := desc.Annotations[ocispec.AnnotationRefName]; !ok {
|
||||
@@ -104,11 +121,6 @@ func InfoCmd(ctx context.Context, o *flags.InfoOpts, s *store.Layout) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.ListRepos {
|
||||
buildListRepos(items...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sort items by ref and arch
|
||||
sort.Sort(byReferenceAndArch(items))
|
||||
|
||||
@@ -123,30 +135,6 @@ func InfoCmd(ctx context.Context, o *flags.InfoOpts, s *store.Layout) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildListRepos(items ...item) {
|
||||
// Create map to track unique repository names
|
||||
repos := make(map[string]bool)
|
||||
|
||||
for _, i := range items {
|
||||
repoName := ""
|
||||
for j := 0; j < len(i.Reference); j++ {
|
||||
if i.Reference[j] == '/' {
|
||||
repoName = i.Reference[:j]
|
||||
break
|
||||
}
|
||||
}
|
||||
if repoName == "" {
|
||||
repoName = i.Reference
|
||||
}
|
||||
repos[repoName] = true
|
||||
}
|
||||
|
||||
// Collect and print unique repository names
|
||||
for repoName := range repos {
|
||||
fmt.Println(repoName)
|
||||
}
|
||||
}
|
||||
|
||||
func buildTable(items ...item) {
|
||||
// Create a table for the results
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
@@ -210,7 +198,7 @@ func (a byReferenceAndArch) Less(i, j int) bool {
|
||||
return a[i].Reference < a[j].Reference
|
||||
}
|
||||
|
||||
func newItem(s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, plat string, o *flags.InfoOpts) item {
|
||||
func newItem(s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, plat string, o *InfoOpts) item {
|
||||
var size int64 = 0
|
||||
for _, l := range m.Layers {
|
||||
size += l.Size
|
||||
@@ -238,11 +226,7 @@ func newItem(s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, plat
|
||||
ctype = "sbom"
|
||||
}
|
||||
|
||||
refName := desc.Annotations["io.containerd.image.name"]
|
||||
if refName == "" {
|
||||
refName = desc.Annotations[ocispec.AnnotationRefName]
|
||||
}
|
||||
ref, err := reference.Parse(refName)
|
||||
ref, err := reference.Parse(desc.Annotations[ocispec.AnnotationRefName])
|
||||
if err != nil {
|
||||
return item{}
|
||||
}
|
||||
|
||||
@@ -2,42 +2,39 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/archives"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/content"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
)
|
||||
|
||||
// extracts the contents of an archived oci layout to an existing oci layout
|
||||
func LoadCmd(ctx context.Context, o *flags.LoadOpts, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
type LoadOpts struct {
|
||||
*RootOpts
|
||||
TempOverride string
|
||||
}
|
||||
|
||||
func (o *LoadOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
// On Unix systems, the default is $TMPDIR if non-empty, else /tmp.
|
||||
// On Windows, the default is GetTempPath, returning the first non-empty
|
||||
// value from %TMP%, %TEMP%, %USERPROFILE%, or the Windows directory.
|
||||
// On Plan 9, the default is /tmp.
|
||||
f.StringVarP(&o.TempOverride, "tempdir", "t", "", "overrides the default directory for temporary files, as returned by your OS.")
|
||||
}
|
||||
|
||||
// LoadCmd
|
||||
// TODO: Just use mholt/archiver for now, even though we don't need most of it
|
||||
func LoadCmd(ctx context.Context, o *LoadOpts, archiveRefs ...string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
tempOverride := o.TempOverride
|
||||
|
||||
if tempOverride == "" {
|
||||
tempOverride = os.Getenv(consts.HaulerTempDir)
|
||||
}
|
||||
|
||||
tempDir, err := os.MkdirTemp(tempOverride, consts.DefaultHaulerTempDirName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
l.Debugf("using temporary directory at [%s]", tempDir)
|
||||
|
||||
for _, fileName := range o.FileName {
|
||||
l.Infof("loading haul [%s] to [%s]", fileName, o.StoreDir)
|
||||
err := unarchiveLayoutTo(ctx, fileName, o.StoreDir, tempDir)
|
||||
for _, archiveRef := range archiveRefs {
|
||||
l.Infof("loading content from [%s] to [%s]", archiveRef, o.StoreDir)
|
||||
err := unarchiveLayoutTo(ctx, archiveRef, o.StoreDir, o.TempOverride)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -46,46 +43,19 @@ func LoadCmd(ctx context.Context, o *flags.LoadOpts, rso *flags.StoreRootOpts, r
|
||||
return nil
|
||||
}
|
||||
|
||||
// accepts an archived OCI layout, extracts the contents to an existing OCI layout, and preserves the index
|
||||
func unarchiveLayoutTo(ctx context.Context, haulPath string, dest string, tempDir string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if strings.HasPrefix(haulPath, "http://") || strings.HasPrefix(haulPath, "https://") {
|
||||
l.Debugf("detected remote archive... starting download... [%s]", haulPath)
|
||||
|
||||
h := getter.NewHttp()
|
||||
parsedURL, err := url.Parse(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := h.Open(ctx, parsedURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
fileName := h.Name(parsedURL)
|
||||
if fileName == "" {
|
||||
fileName = filepath.Base(parsedURL.Path)
|
||||
}
|
||||
haulPath = filepath.Join(tempDir, fileName)
|
||||
|
||||
out, err := os.Create(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
if _, err = io.Copy(out, rc); err != nil {
|
||||
return err
|
||||
}
|
||||
// unarchiveLayoutTo accepts an archived oci layout and extracts the contents to an existing oci layout, preserving the index
|
||||
func unarchiveLayoutTo(ctx context.Context, archivePath string, dest string, tempOverride string) error {
|
||||
tmpdir, err := os.MkdirTemp(tempOverride, "hauler")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
if err := archives.Unarchive(ctx, haulPath, tempDir); err != nil {
|
||||
if err := archiver.Unarchive(archivePath, tmpdir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s, err := store.NewLayout(tempDir)
|
||||
s, err := store.NewLayout(tmpdir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,42 +1,37 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
||||
referencev3 "github.com/distribution/distribution/v3/reference"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
libv1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/layout"
|
||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
imagev1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/archives"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
)
|
||||
|
||||
// saves a content store to store archives
|
||||
func SaveCmd(ctx context.Context, o *flags.SaveOpts, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
type SaveOpts struct {
|
||||
*RootOpts
|
||||
FileName string
|
||||
}
|
||||
|
||||
func (o *SaveOpts) AddArgs(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.FileName, "filename", "f", "haul.tar.zst", "Name of archive")
|
||||
}
|
||||
|
||||
// SaveCmd
|
||||
// TODO: Just use mholt/archiver for now, even though we don't need most of it
|
||||
func SaveCmd(ctx context.Context, o *SaveOpts, outputFile string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// maps to handle compression and archival types
|
||||
compressionMap := archives.CompressionMap
|
||||
archivalMap := archives.ArchivalMap
|
||||
|
||||
// TODO: Support more formats?
|
||||
// Select the correct compression and archival type based on user input
|
||||
compression := compressionMap["zst"]
|
||||
archival := archivalMap["tar"]
|
||||
a := archiver.NewTarZstd()
|
||||
a.OverwriteExisting = true
|
||||
|
||||
absOutputfile, err := filepath.Abs(o.FileName)
|
||||
absOutputfile, err := filepath.Abs(outputFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -50,194 +45,11 @@ func SaveCmd(ctx context.Context, o *flags.SaveOpts, rso *flags.StoreRootOpts, r
|
||||
return err
|
||||
}
|
||||
|
||||
// create the manifest.json file
|
||||
if err := writeExportsManifest(ctx, ".", o.Platform); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create the archive
|
||||
err = archives.Archive(ctx, ".", absOutputfile, compression, archival)
|
||||
err = a.Archive([]string{"."}, absOutputfile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("saving store [%s] to archive [%s]", o.StoreDir, o.FileName)
|
||||
return nil
|
||||
}
|
||||
|
||||
type exports struct {
|
||||
digests []string
|
||||
records map[string]tarball.Descriptor
|
||||
}
|
||||
|
||||
func writeExportsManifest(ctx context.Context, dir string, platformStr string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// validate platform format
|
||||
platform, err := libv1.ParsePlatform(platformStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oci, err := layout.FromPath(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
idx, err := oci.ImageIndex()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imx, err := idx.IndexManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
x := &exports{
|
||||
digests: []string{},
|
||||
records: map[string]tarball.Descriptor{},
|
||||
}
|
||||
|
||||
for _, desc := range imx.Manifests {
|
||||
l.Debugf("descriptor [%s] = [%s]", desc.Digest.String(), desc.MediaType)
|
||||
if artifactType := types.MediaType(desc.ArtifactType); artifactType != "" && !artifactType.IsImage() && !artifactType.IsIndex() {
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING ARTIFACT [%q]", desc.Digest.String(), desc.ArtifactType)
|
||||
continue
|
||||
}
|
||||
if desc.Annotations != nil {
|
||||
// we only care about images that cosign has added to the layout index
|
||||
if kind, hasKind := desc.Annotations[consts.KindAnnotationName]; hasKind {
|
||||
if refName, hasRefName := desc.Annotations["io.containerd.image.name"]; hasRefName {
|
||||
// branch on image (aka image manifest) or image index
|
||||
switch kind {
|
||||
case consts.KindAnnotationImage:
|
||||
if err := x.record(ctx, idx, desc, refName); err != nil {
|
||||
return err
|
||||
}
|
||||
case consts.KindAnnotationIndex:
|
||||
l.Debugf("index [%s]: digest=[%s]... type=[%s]... size=[%d]", refName, desc.Digest.String(), desc.MediaType, desc.Size)
|
||||
|
||||
// when no platform is provided, warn the user of potential mismatch on import
|
||||
if platform.String() == "" {
|
||||
l.Warnf("specify an export platform to prevent potential platform mismatch on import of index [%s]", refName)
|
||||
}
|
||||
|
||||
iix, err := idx.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ixm, err := iix.IndexManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ixd := range ixm.Manifests {
|
||||
if ixd.MediaType.IsImage() {
|
||||
// check if platform is provided, if so, skip anything that doesn't match
|
||||
if platform.String() != "" {
|
||||
if ixd.Platform.Architecture != platform.Architecture || ixd.Platform.OS != platform.OS {
|
||||
l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: does not match the supplied platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// skip 'unknown' platforms... docker hates
|
||||
if ixd.Platform.Architecture == "unknown" && ixd.Platform.OS == "unknown" {
|
||||
l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: matches unknown platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := x.record(ctx, iix, ixd, refName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING KIND [%q]", desc.Digest.String(), kind)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
mnf := x.describe()
|
||||
err = json.NewEncoder(&buf).Encode(mnf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return oci.WriteFile(consts.ImageManifestFile, buf.Bytes(), 0666)
|
||||
}
|
||||
|
||||
func (x *exports) describe() tarball.Manifest {
|
||||
m := make(tarball.Manifest, len(x.digests))
|
||||
for i, d := range x.digests {
|
||||
m[i] = x.records[d]
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (x *exports) record(ctx context.Context, index libv1.ImageIndex, desc libv1.Descriptor, refname string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
digest := desc.Digest.String()
|
||||
image, err := index.Image(desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config, err := image.ConfigName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
xd, recorded := x.records[digest]
|
||||
if !recorded {
|
||||
// record one export record per digest
|
||||
x.digests = append(x.digests, digest)
|
||||
xd = tarball.Descriptor{
|
||||
Config: path.Join(imagev1.ImageBlobsDir, config.Algorithm, config.Hex),
|
||||
RepoTags: []string{},
|
||||
Layers: []string{},
|
||||
}
|
||||
|
||||
layers, err := image.Layers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, layer := range layers {
|
||||
xl, err := layer.Digest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
xd.Layers = append(xd.Layers[:], path.Join(imagev1.ImageBlobsDir, xl.Algorithm, xl.Hex))
|
||||
}
|
||||
}
|
||||
|
||||
ref, err := name.ParseReference(refname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// record tags for the digest, eliminating dupes
|
||||
switch tag := ref.(type) {
|
||||
case name.Tag:
|
||||
named, err := referencev3.ParseNormalizedNamed(refname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
named = referencev3.TagNameOnly(named)
|
||||
repotag := referencev3.FamiliarString(named)
|
||||
xd.RepoTags = append(xd.RepoTags[:], repotag)
|
||||
slices.Sort(xd.RepoTags)
|
||||
xd.RepoTags = slices.Compact(xd.RepoTags)
|
||||
ref = tag.Digest(digest)
|
||||
}
|
||||
|
||||
l.Debugf("image [%s]: type=%s, size=%d", ref.Name(), desc.MediaType, desc.Size)
|
||||
// record export descriptor for the digest
|
||||
x.records[digest] = xd
|
||||
|
||||
l.Infof("saved store [%s] -> [%s]", o.StoreDir, absOutputfile)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/distribution/distribution/v3/configuration"
|
||||
dcontext "github.com/distribution/distribution/v3/context"
|
||||
@@ -13,43 +12,32 @@ import (
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory"
|
||||
"github.com/distribution/distribution/v3/version"
|
||||
"gopkg.in/yaml.v3"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/internal/server"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
"github.com/rancherfederal/hauler/internal/server"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
)
|
||||
|
||||
func DefaultRegistryConfig(o *flags.ServeRegistryOpts, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *configuration.Configuration {
|
||||
cfg := &configuration.Configuration{
|
||||
Version: "0.1",
|
||||
Storage: configuration.Storage{
|
||||
"cache": configuration.Parameters{"blobdescriptor": "inmemory"},
|
||||
"filesystem": configuration.Parameters{"rootdirectory": o.RootDir},
|
||||
"maintenance": configuration.Parameters{
|
||||
"readonly": map[any]any{"enabled": o.ReadOnly},
|
||||
},
|
||||
},
|
||||
}
|
||||
type ServeRegistryOpts struct {
|
||||
*RootOpts
|
||||
|
||||
if o.TLSCert != "" && o.TLSKey != "" {
|
||||
cfg.HTTP.TLS.Certificate = o.TLSCert
|
||||
cfg.HTTP.TLS.Key = o.TLSKey
|
||||
}
|
||||
|
||||
cfg.HTTP.Addr = fmt.Sprintf(":%d", o.Port)
|
||||
cfg.HTTP.Headers = http.Header{
|
||||
"X-Content-Type-Options": []string{"nosniff"},
|
||||
}
|
||||
|
||||
cfg.Log.Level = configuration.Loglevel(ro.LogLevel)
|
||||
cfg.Validation.Manifests.URLs.Allow = []string{".+"}
|
||||
|
||||
return cfg
|
||||
Port int
|
||||
RootDir string
|
||||
ConfigFile string
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
func ServeRegistryCmd(ctx context.Context, o *flags.ServeRegistryOpts, s *store.Layout, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
func (o *ServeRegistryOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.IntVarP(&o.Port, "port", "p", 5000, "Port to listen on.")
|
||||
f.StringVar(&o.RootDir, "directory", "registry", "Directory to use for backend. Defaults to $PWD/registry")
|
||||
f.StringVarP(&o.ConfigFile, "config", "c", "", "Path to a config file, will override all other configs")
|
||||
f.BoolVar(&o.ReadOnly, "readonly", true, "Run the registry as readonly.")
|
||||
}
|
||||
|
||||
func ServeRegistryCmd(ctx context.Context, o *ServeRegistryOpts, s *store.Layout) error {
|
||||
l := log.FromContext(ctx)
|
||||
ctx = dcontext.WithVersion(ctx, version.Version)
|
||||
|
||||
@@ -58,14 +46,14 @@ func ServeRegistryCmd(ctx context.Context, o *flags.ServeRegistryOpts, s *store.
|
||||
return err
|
||||
}
|
||||
|
||||
opts := &flags.CopyOpts{}
|
||||
if err := CopyCmd(ctx, opts, s, "registry://"+tr.Registry(), ro); err != nil {
|
||||
opts := &CopyOpts{}
|
||||
if err := CopyCmd(ctx, opts, s, "registry://"+tr.Registry()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tr.Close()
|
||||
|
||||
cfg := DefaultRegistryConfig(o, rso, ro)
|
||||
cfg := o.defaultRegistryConfig()
|
||||
if o.ConfigFile != "" {
|
||||
ucfg, err := loadConfig(o.ConfigFile)
|
||||
if err != nil {
|
||||
@@ -75,16 +63,6 @@ func ServeRegistryCmd(ctx context.Context, o *flags.ServeRegistryOpts, s *store.
|
||||
}
|
||||
|
||||
l.Infof("starting registry on port [%d]", o.Port)
|
||||
|
||||
yamlConfig, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
l.Errorf("failed to validate/output registry configuration: %v", err)
|
||||
} else {
|
||||
l.Infof("using registry configuration... \n%s", strings.TrimSpace(string(yamlConfig)))
|
||||
}
|
||||
|
||||
l.Debugf("detailed registry configuration: %+v", cfg)
|
||||
|
||||
r, err := server.NewRegistry(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -97,30 +75,45 @@ func ServeRegistryCmd(ctx context.Context, o *flags.ServeRegistryOpts, s *store.
|
||||
return nil
|
||||
}
|
||||
|
||||
func ServeFilesCmd(ctx context.Context, o *flags.ServeFilesOpts, s *store.Layout, ro *flags.CliRootOpts) error {
|
||||
type ServeFilesOpts struct {
|
||||
*RootOpts
|
||||
|
||||
Port int
|
||||
Timeout int
|
||||
RootDir string
|
||||
}
|
||||
|
||||
func (o *ServeFilesOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.IntVarP(&o.Port, "port", "p", 8080, "Port to listen on.")
|
||||
f.IntVarP(&o.Timeout, "timeout", "t", 60, "Set the http request timeout duration in seconds for both reads and write.")
|
||||
f.StringVar(&o.RootDir, "directory", "fileserver", "Directory to use for backend. Defaults to $PWD/fileserver")
|
||||
}
|
||||
|
||||
func ServeFilesCmd(ctx context.Context, o *ServeFilesOpts, s *store.Layout) error {
|
||||
l := log.FromContext(ctx)
|
||||
ctx = dcontext.WithVersion(ctx, version.Version)
|
||||
|
||||
opts := &flags.CopyOpts{}
|
||||
if err := CopyCmd(ctx, opts, s, "dir://"+o.RootDir, ro); err != nil {
|
||||
opts := &CopyOpts{}
|
||||
if err := CopyCmd(ctx, opts, s, "dir://"+o.RootDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := server.NewFile(ctx, *o)
|
||||
cfg := server.FileConfig{
|
||||
Root: o.RootDir,
|
||||
Port: o.Port,
|
||||
Timeout: o.Timeout,
|
||||
}
|
||||
|
||||
f, err := server.NewFile(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.TLSCert != "" && o.TLSKey != "" {
|
||||
l.Infof("starting file server with tls on port [%d]", o.Port)
|
||||
if err := f.ListenAndServeTLS(o.TLSCert, o.TLSKey); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
l.Infof("starting file server on port [%d]", o.Port)
|
||||
if err := f.ListenAndServe(); err != nil {
|
||||
return err
|
||||
}
|
||||
l.Infof("starting file server on port [%d]", o.Port)
|
||||
if err := f.ListenAndServe(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -134,3 +127,27 @@ func loadConfig(filename string) (*configuration.Configuration, error) {
|
||||
|
||||
return configuration.Parse(f)
|
||||
}
|
||||
|
||||
func (o *ServeRegistryOpts) defaultRegistryConfig() *configuration.Configuration {
|
||||
cfg := &configuration.Configuration{
|
||||
Version: "0.1",
|
||||
Storage: configuration.Storage{
|
||||
"cache": configuration.Parameters{"blobdescriptor": "inmemory"},
|
||||
"filesystem": configuration.Parameters{"rootdirectory": o.RootDir},
|
||||
"maintenance": configuration.Parameters{
|
||||
"readonly": map[any]any{"enabled": o.ReadOnly},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Add validation configuration
|
||||
cfg.Validation.Manifests.URLs.Allow = []string{".+"}
|
||||
|
||||
cfg.Log.Level = "info"
|
||||
cfg.HTTP.Addr = fmt.Sprintf(":%d", o.Port)
|
||||
cfg.HTTP.Headers = http.Header{
|
||||
"X-Content-Type-Options": []string{"nosniff"},
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -5,51 +5,54 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/spf13/cobra"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
convert "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/convert"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
v1alpha1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
tchart "hauler.dev/go/hauler/pkg/collection/chart"
|
||||
"hauler.dev/go/hauler/pkg/collection/imagetxt"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
"hauler.dev/go/hauler/pkg/cosign"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
tchart "github.com/rancherfederal/hauler/pkg/collection/chart"
|
||||
"github.com/rancherfederal/hauler/pkg/collection/imagetxt"
|
||||
"github.com/rancherfederal/hauler/pkg/collection/k3s"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/content"
|
||||
"github.com/rancherfederal/hauler/pkg/cosign"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/pkg/reference"
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
)
|
||||
|
||||
func SyncCmd(ctx context.Context, o *flags.SyncOpts, s *store.Layout, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
type SyncOpts struct {
|
||||
*RootOpts
|
||||
ContentFiles []string
|
||||
Key string
|
||||
Products []string
|
||||
Platform string
|
||||
Registry string
|
||||
ProductRegistry string
|
||||
}
|
||||
|
||||
func (o *SyncOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringSliceVarP(&o.ContentFiles, "files", "f", []string{}, "Path(s) to local content files (Manifests). i.e. '--files ./rke2-files.yml")
|
||||
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Path to the key for signature verification")
|
||||
f.StringSliceVar(&o.Products, "products", []string{}, "(Optional) Feature for RGS Carbide customers to fetch collections and content from the Carbide Registry. i.e. '--product rancher=v2.8.5,rke2=v1.28.11+rke2r1'")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specific platform to save. i.e. linux/amd64. Defaults to all if flag is omitted.")
|
||||
f.StringVarP(&o.Registry, "registry", "r", "", "(Optional) Default pull registry for image refs that are not specifying a registry name.")
|
||||
f.StringVarP(&o.ProductRegistry, "product-registry", "c", "", "(Optional) Specific Product Registry to use. Defaults to RGS Carbide Registry (rgcrprod.azurecr.us).")
|
||||
}
|
||||
|
||||
func SyncCmd(ctx context.Context, o *SyncOpts, s *store.Layout) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
tempOverride := o.TempOverride
|
||||
|
||||
if tempOverride == "" {
|
||||
tempOverride = os.Getenv(consts.HaulerTempDir)
|
||||
}
|
||||
|
||||
tempDir, err := os.MkdirTemp(tempOverride, consts.DefaultHaulerTempDirName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
l.Debugf("using temporary directory at [%s]", tempDir)
|
||||
|
||||
// if passed products, check for a remote manifest to retrieve and use
|
||||
for _, productName := range o.Products {
|
||||
l.Infof("processing product manifest for [%s] to store [%s]", productName, o.StoreDir)
|
||||
parts := strings.Split(productName, "=")
|
||||
// if passed products, check for a remote manifest to retrieve and use.
|
||||
for _, product := range o.Products {
|
||||
l.Infof("processing content file for product: '%s'", product)
|
||||
parts := strings.Split(product, "=")
|
||||
tag := strings.ReplaceAll(parts[1], "+", "-")
|
||||
|
||||
ProductRegistry := o.ProductRegistry // cli flag
|
||||
@@ -59,85 +62,47 @@ func SyncCmd(ctx context.Context, o *flags.SyncOpts, s *store.Layout, rso *flags
|
||||
}
|
||||
|
||||
manifestLoc := fmt.Sprintf("%s/hauler/%s-manifest.yaml:%s", ProductRegistry, parts[0], tag)
|
||||
l.Infof("fetching product manifest from [%s]", manifestLoc)
|
||||
img := v1.Image{
|
||||
l.Infof("retrieving product manifest from: '%s'", manifestLoc)
|
||||
img := v1alpha1.Image{
|
||||
Name: manifestLoc,
|
||||
}
|
||||
err := storeImage(ctx, s, img, o.Platform, rso, ro)
|
||||
err := storeImage(ctx, s, img, o.Platform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ExtractCmd(ctx, &flags.ExtractOpts{StoreRootOpts: o.StoreRootOpts}, s, fmt.Sprintf("hauler/%s-manifest.yaml:%s", parts[0], tag))
|
||||
err = ExtractCmd(ctx, &ExtractOpts{RootOpts: o.RootOpts}, s, fmt.Sprintf("hauler/%s-manifest.yaml:%s", parts[0], tag))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileName := fmt.Sprintf("%s-manifest.yaml", parts[0])
|
||||
filename := fmt.Sprintf("%s-manifest.yaml", parts[0])
|
||||
|
||||
fi, err := os.Open(fileName)
|
||||
fi, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = processContent(ctx, fi, o, s, rso, ro)
|
||||
err = processContent(ctx, fi, o, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Infof("processing completed successfully")
|
||||
}
|
||||
|
||||
// If passed a local manifest, process it
|
||||
for _, fileName := range o.FileName {
|
||||
l.Infof("processing manifest [%s] to store [%s]", fileName, o.StoreDir)
|
||||
|
||||
haulPath := fileName
|
||||
if strings.HasPrefix(haulPath, "http://") || strings.HasPrefix(haulPath, "https://") {
|
||||
l.Debugf("detected remote manifest... starting download... [%s]", haulPath)
|
||||
|
||||
h := getter.NewHttp()
|
||||
parsedURL, err := url.Parse(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := h.Open(ctx, parsedURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
fileName := h.Name(parsedURL)
|
||||
if fileName == "" {
|
||||
fileName = filepath.Base(parsedURL.Path)
|
||||
}
|
||||
haulPath = filepath.Join(tempDir, fileName)
|
||||
|
||||
out, err := os.Create(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
if _, err = io.Copy(out, rc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fi, err := os.Open(haulPath)
|
||||
// if passed a local manifest, process it
|
||||
for _, filename := range o.ContentFiles {
|
||||
l.Debugf("processing content file: '%s'", filename)
|
||||
fi, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fi.Close()
|
||||
|
||||
err = processContent(ctx, fi, o, s, rso, ro)
|
||||
err = processContent(ctx, fi, o, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("processing completed successfully")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *store.Layout, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
func processContent(ctx context.Context, fi *os.File, o *SyncOpts, s *store.Layout) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
reader := yaml.NewYAMLReader(bufio.NewReader(fi))
|
||||
@@ -151,470 +116,179 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
docs = append(docs, raw)
|
||||
}
|
||||
|
||||
for _, doc := range docs {
|
||||
obj, err := content.Load(doc)
|
||||
if err != nil {
|
||||
l.Warnf("skipping syncing due to %v", err)
|
||||
l.Debugf("skipping sync of unknown content")
|
||||
continue
|
||||
}
|
||||
|
||||
gvk := obj.GroupVersionKind()
|
||||
l.Infof("syncing content [%s] with [kind=%s] to store [%s]", gvk.GroupVersion(), gvk.Kind, o.StoreDir)
|
||||
l.Infof("syncing [%s] to store", obj.GroupVersionKind().String())
|
||||
|
||||
switch gvk.Kind {
|
||||
|
||||
case consts.FilesContentKind:
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
|
||||
var alphaCfg v1alpha1.Files
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.Files
|
||||
if err := convert.ConvertFiles(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range v1Cfg.Spec.Files {
|
||||
if err := storeFile(ctx, s, f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case "v1":
|
||||
var cfg v1.Files
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range cfg.Spec.Files {
|
||||
if err := storeFile(ctx, s, f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
// TODO: Should type switch instead...
|
||||
switch obj.GroupVersionKind().Kind {
|
||||
case v1alpha1.FilesContentKind:
|
||||
var cfg v1alpha1.Files
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case consts.ImagesContentKind:
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
|
||||
var alphaCfg v1alpha1.Images
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
for _, f := range cfg.Spec.Files {
|
||||
err := storeFile(ctx, s, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.Images
|
||||
if err := convert.ConvertImages(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a := v1Cfg.GetAnnotations()
|
||||
for _, i := range v1Cfg.Spec.Images {
|
||||
|
||||
if a[consts.ImageAnnotationRegistry] != "" || o.Registry != "" {
|
||||
newRef, _ := reference.Parse(i.Name)
|
||||
newReg := o.Registry
|
||||
if o.Registry == "" && a[consts.ImageAnnotationRegistry] != "" {
|
||||
newReg = a[consts.ImageAnnotationRegistry]
|
||||
}
|
||||
if newRef.Context().RegistryStr() == "" {
|
||||
newRef, err = reference.Relocate(i.Name, newReg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
i.Name = newRef.Name()
|
||||
}
|
||||
|
||||
hasAnnotationIdentityOptions := a[consts.ImageAnnotationCertIdentityRegexp] != "" || a[consts.ImageAnnotationCertIdentity] != ""
|
||||
hasCliIdentityOptions := o.CertIdentityRegexp != "" || o.CertIdentity != ""
|
||||
hasImageIdentityOptions := i.CertIdentityRegexp != "" || i.CertIdentity != ""
|
||||
|
||||
needsKeylessVerificaton := hasAnnotationIdentityOptions || hasCliIdentityOptions || hasImageIdentityOptions
|
||||
needsPubKeyVerification := a[consts.ImageAnnotationKey] != "" || o.Key != "" || i.Key != ""
|
||||
if needsPubKeyVerification {
|
||||
key := o.Key
|
||||
if o.Key == "" && a[consts.ImageAnnotationKey] != "" {
|
||||
key, err = homedir.Expand(a[consts.ImageAnnotationKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if i.Key != "" {
|
||||
key, err = homedir.Expand(i.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.Debugf("key for image [%s]", key)
|
||||
|
||||
tlog := o.Tlog
|
||||
if !o.Tlog && a[consts.ImageAnnotationTlog] == "true" {
|
||||
tlog = true
|
||||
}
|
||||
if i.Tlog {
|
||||
tlog = i.Tlog
|
||||
}
|
||||
l.Debugf("transparency log for verification [%b]", tlog)
|
||||
|
||||
if err := cosign.VerifySignature(ctx, s, key, tlog, i.Name, rso, ro); err != nil {
|
||||
l.Errorf("signature verification failed for image [%s]... skipping...\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", i.Name)
|
||||
} else if needsKeylessVerificaton { //Keyless signature verification
|
||||
certIdentityRegexp := o.CertIdentityRegexp
|
||||
if o.CertIdentityRegexp == "" && a[consts.ImageAnnotationCertIdentityRegexp] != "" {
|
||||
certIdentityRegexp = a[consts.ImageAnnotationCertIdentityRegexp]
|
||||
}
|
||||
if i.CertIdentityRegexp != "" {
|
||||
certIdentityRegexp = i.CertIdentityRegexp
|
||||
}
|
||||
l.Debugf("certIdentityRegexp for image [%s]", certIdentityRegexp)
|
||||
|
||||
certIdentity := o.CertIdentity
|
||||
if o.CertIdentity == "" && a[consts.ImageAnnotationCertIdentity] != "" {
|
||||
certIdentity = a[consts.ImageAnnotationCertIdentity]
|
||||
}
|
||||
if i.CertIdentity != "" {
|
||||
certIdentity = i.CertIdentity
|
||||
}
|
||||
l.Debugf("certIdentity for image [%s]", certIdentity)
|
||||
|
||||
certOidcIssuer := o.CertOidcIssuer
|
||||
if o.CertOidcIssuer == "" && a[consts.ImageAnnotationCertOidcIssuer] != "" {
|
||||
certOidcIssuer = a[consts.ImageAnnotationCertOidcIssuer]
|
||||
}
|
||||
if i.CertOidcIssuer != "" {
|
||||
certOidcIssuer = i.CertOidcIssuer
|
||||
}
|
||||
l.Debugf("certOidcIssuer for image [%s]", certOidcIssuer)
|
||||
|
||||
certOidcIssuerRegexp := o.CertOidcIssuerRegexp
|
||||
if o.CertOidcIssuerRegexp == "" && a[consts.ImageAnnotationCertOidcIssuerRegexp] != "" {
|
||||
certOidcIssuerRegexp = a[consts.ImageAnnotationCertOidcIssuerRegexp]
|
||||
}
|
||||
if i.CertOidcIssuerRegexp != "" {
|
||||
certOidcIssuerRegexp = i.CertOidcIssuerRegexp
|
||||
}
|
||||
l.Debugf("certOidcIssuerRegexp for image [%s]", certOidcIssuerRegexp)
|
||||
|
||||
certGithubWorkflowRepository := o.CertGithubWorkflowRepository
|
||||
if o.CertGithubWorkflowRepository == "" && a[consts.ImageAnnotationCertGithubWorkflowRepository] != "" {
|
||||
certGithubWorkflowRepository = a[consts.ImageAnnotationCertGithubWorkflowRepository]
|
||||
}
|
||||
if i.CertGithubWorkflowRepository != "" {
|
||||
certGithubWorkflowRepository = i.CertGithubWorkflowRepository
|
||||
}
|
||||
l.Debugf("certGithubWorkflowRepository for image [%s]", certGithubWorkflowRepository)
|
||||
|
||||
tlog := o.Tlog
|
||||
if !o.Tlog && a[consts.ImageAnnotationTlog] == "true" {
|
||||
tlog = true
|
||||
}
|
||||
if i.Tlog {
|
||||
tlog = i.Tlog
|
||||
}
|
||||
l.Debugf("transparency log for verification [%b]", tlog)
|
||||
|
||||
if err := cosign.VerifyKeylessSignature(ctx, s, certIdentity, certIdentityRegexp, certOidcIssuer, certOidcIssuerRegexp, certGithubWorkflowRepository, tlog, i.Name, rso, ro); err != nil {
|
||||
l.Errorf("keyless signature verification failed for image [%s]... skipping...\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("keyless signature verified for image [%s]", i.Name)
|
||||
}
|
||||
|
||||
platform := o.Platform
|
||||
if o.Platform == "" && a[consts.ImageAnnotationPlatform] != "" {
|
||||
platform = a[consts.ImageAnnotationPlatform]
|
||||
}
|
||||
if i.Platform != "" {
|
||||
platform = i.Platform
|
||||
}
|
||||
|
||||
if err := storeImage(ctx, s, i, platform, rso, ro); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.CopyAll(ctx, s.OCI, nil)
|
||||
|
||||
case "v1":
|
||||
var cfg v1.Images
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a := cfg.GetAnnotations()
|
||||
for _, i := range cfg.Spec.Images {
|
||||
|
||||
if a[consts.ImageAnnotationRegistry] != "" || o.Registry != "" {
|
||||
newRef, _ := reference.Parse(i.Name)
|
||||
newReg := o.Registry
|
||||
if o.Registry == "" && a[consts.ImageAnnotationRegistry] != "" {
|
||||
newReg = a[consts.ImageAnnotationRegistry]
|
||||
}
|
||||
if newRef.Context().RegistryStr() == "" {
|
||||
newRef, err = reference.Relocate(i.Name, newReg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
i.Name = newRef.Name()
|
||||
}
|
||||
|
||||
hasAnnotationIdentityOptions := a[consts.ImageAnnotationCertIdentityRegexp] != "" || a[consts.ImageAnnotationCertIdentity] != ""
|
||||
hasCliIdentityOptions := o.CertIdentityRegexp != "" || o.CertIdentity != ""
|
||||
hasImageIdentityOptions := i.CertIdentityRegexp != "" || i.CertIdentity != ""
|
||||
|
||||
needsKeylessVerificaton := hasAnnotationIdentityOptions || hasCliIdentityOptions || hasImageIdentityOptions
|
||||
needsPubKeyVerification := a[consts.ImageAnnotationKey] != "" || o.Key != "" || i.Key != ""
|
||||
if needsPubKeyVerification {
|
||||
key := o.Key
|
||||
if o.Key == "" && a[consts.ImageAnnotationKey] != "" {
|
||||
key, err = homedir.Expand(a[consts.ImageAnnotationKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if i.Key != "" {
|
||||
key, err = homedir.Expand(i.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.Debugf("key for image [%s]", key)
|
||||
|
||||
tlog := o.Tlog
|
||||
if !o.Tlog && a[consts.ImageAnnotationTlog] == "true" {
|
||||
tlog = true
|
||||
}
|
||||
if i.Tlog {
|
||||
tlog = i.Tlog
|
||||
}
|
||||
l.Debugf("transparency log for verification [%b]", tlog)
|
||||
|
||||
if err := cosign.VerifySignature(ctx, s, key, tlog, i.Name, rso, ro); err != nil {
|
||||
l.Errorf("signature verification failed for image [%s]... skipping...\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", i.Name)
|
||||
} else if needsKeylessVerificaton { //Keyless signature verification
|
||||
certIdentityRegexp := o.CertIdentityRegexp
|
||||
if o.CertIdentityRegexp == "" && a[consts.ImageAnnotationCertIdentityRegexp] != "" {
|
||||
certIdentityRegexp = a[consts.ImageAnnotationCertIdentityRegexp]
|
||||
}
|
||||
if i.CertIdentityRegexp != "" {
|
||||
certIdentityRegexp = i.CertIdentityRegexp
|
||||
}
|
||||
l.Debugf("certIdentityRegexp for image [%s]", certIdentityRegexp)
|
||||
|
||||
certIdentity := o.CertIdentity
|
||||
if o.CertIdentity == "" && a[consts.ImageAnnotationCertIdentity] != "" {
|
||||
certIdentity = a[consts.ImageAnnotationCertIdentity]
|
||||
}
|
||||
if i.CertIdentity != "" {
|
||||
certIdentity = i.CertIdentity
|
||||
}
|
||||
l.Debugf("certIdentity for image [%s]", certIdentity)
|
||||
|
||||
certOidcIssuer := o.CertOidcIssuer
|
||||
if o.CertOidcIssuer == "" && a[consts.ImageAnnotationCertOidcIssuer] != "" {
|
||||
certOidcIssuer = a[consts.ImageAnnotationCertOidcIssuer]
|
||||
}
|
||||
if i.CertOidcIssuer != "" {
|
||||
certOidcIssuer = i.CertOidcIssuer
|
||||
}
|
||||
l.Debugf("certOidcIssuer for image [%s]", certOidcIssuer)
|
||||
|
||||
certOidcIssuerRegexp := o.CertOidcIssuerRegexp
|
||||
if o.CertOidcIssuerRegexp == "" && a[consts.ImageAnnotationCertOidcIssuerRegexp] != "" {
|
||||
certOidcIssuerRegexp = a[consts.ImageAnnotationCertOidcIssuerRegexp]
|
||||
}
|
||||
if i.CertOidcIssuerRegexp != "" {
|
||||
certOidcIssuerRegexp = i.CertOidcIssuerRegexp
|
||||
}
|
||||
l.Debugf("certOidcIssuerRegexp for image [%s]", certOidcIssuerRegexp)
|
||||
|
||||
certGithubWorkflowRepository := o.CertGithubWorkflowRepository
|
||||
if o.CertGithubWorkflowRepository == "" && a[consts.ImageAnnotationCertGithubWorkflowRepository] != "" {
|
||||
certGithubWorkflowRepository = a[consts.ImageAnnotationCertGithubWorkflowRepository]
|
||||
}
|
||||
if i.CertGithubWorkflowRepository != "" {
|
||||
certGithubWorkflowRepository = i.CertGithubWorkflowRepository
|
||||
}
|
||||
l.Debugf("certGithubWorkflowRepository for image [%s]", certGithubWorkflowRepository)
|
||||
|
||||
tlog := o.Tlog
|
||||
if !o.Tlog && a[consts.ImageAnnotationTlog] == "true" {
|
||||
tlog = true
|
||||
}
|
||||
if i.Tlog {
|
||||
tlog = i.Tlog
|
||||
}
|
||||
l.Debugf("transparency log for verification [%b]", tlog)
|
||||
|
||||
if err := cosign.VerifyKeylessSignature(ctx, s, certIdentity, certIdentityRegexp, certOidcIssuer, certOidcIssuerRegexp, certGithubWorkflowRepository, tlog, i.Name, rso, ro); err != nil {
|
||||
l.Errorf("keyless signature verification failed for image [%s]... skipping...\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("keyless signature verified for image [%s]", i.Name)
|
||||
}
|
||||
platform := o.Platform
|
||||
if o.Platform == "" && a[consts.ImageAnnotationPlatform] != "" {
|
||||
platform = a[consts.ImageAnnotationPlatform]
|
||||
}
|
||||
if i.Platform != "" {
|
||||
platform = i.Platform
|
||||
}
|
||||
|
||||
if err := storeImage(ctx, s, i, platform, rso, ro); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.CopyAll(ctx, s.OCI, nil)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
case consts.ChartsContentKind:
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
case v1alpha1.ImagesContentKind:
|
||||
var cfg v1alpha1.Images
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
a := cfg.GetAnnotations()
|
||||
for _, i := range cfg.Spec.Images {
|
||||
|
||||
var alphaCfg v1alpha1.Charts
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.Charts
|
||||
if err := convert.ConvertCharts(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ch := range v1Cfg.Spec.Charts {
|
||||
if err := storeChart(ctx, s, ch, &action.ChartPathOptions{}); err != nil {
|
||||
return err
|
||||
// Check if the user provided a registry. If a registry is provided in the annotation, use it for the images that don't have a registry in their ref name.
|
||||
if a[consts.ImageAnnotationRegistry] != "" || o.Registry != "" {
|
||||
newRef, _ := reference.Parse(i.Name)
|
||||
|
||||
newReg := o.Registry // cli flag
|
||||
// if no cli flag but there was an annotation, use the annotation.
|
||||
if o.Registry == "" && a[consts.ImageAnnotationRegistry] != "" {
|
||||
newReg = a[consts.ImageAnnotationRegistry]
|
||||
}
|
||||
|
||||
if newRef.Context().RegistryStr() == "" {
|
||||
newRef, err = reference.Relocate(i.Name, newReg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
i.Name = newRef.Name()
|
||||
}
|
||||
|
||||
case "v1":
|
||||
var cfg v1.Charts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
// Check if the user provided a key. The flag from the CLI takes precedence over the annotation. The individual image key takes precedence over both.
|
||||
if a[consts.ImageAnnotationKey] != "" || o.Key != "" || i.Key != "" {
|
||||
key := o.Key // cli flag
|
||||
// if no cli flag but there was an annotation, use the annotation.
|
||||
if o.Key == "" && a[consts.ImageAnnotationKey] != "" {
|
||||
key, err = homedir.Expand(a[consts.ImageAnnotationKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// the individual image key trumps all
|
||||
if i.Key != "" {
|
||||
key, err = homedir.Expand(i.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.Debugf("key for image [%s]", key)
|
||||
|
||||
// verify signature using the provided key.
|
||||
err := cosign.VerifySignature(ctx, s, key, i.Name)
|
||||
if err != nil {
|
||||
l.Errorf("signature verification failed for image [%s]. ** hauler will skip adding this image to the store **:\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", i.Name)
|
||||
}
|
||||
|
||||
// Check if the user provided a platform. The flag from the CLI takes precedence over the annotation. The individual image platform takes precedence over both.
|
||||
platform := o.Platform // cli flag
|
||||
// if no cli flag but there was an annotation, use the annotation.
|
||||
if o.Platform == "" && a[consts.ImageAnnotationPlatform] != "" {
|
||||
platform = a[consts.ImageAnnotationPlatform]
|
||||
}
|
||||
// the individual image platform trumps all
|
||||
if i.Platform != "" {
|
||||
platform = i.Platform
|
||||
}
|
||||
|
||||
err = storeImage(ctx, s, i, platform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ch := range cfg.Spec.Charts {
|
||||
if err := storeChart(ctx, s, ch, &action.ChartPathOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
// sync with local index
|
||||
s.CopyAll(ctx, s.OCI, nil)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
case v1alpha1.ChartsContentKind:
|
||||
var cfg v1alpha1.Charts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case consts.ChartsCollectionKind:
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
|
||||
var alphaCfg v1alpha1.ThickCharts
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
for _, ch := range cfg.Spec.Charts {
|
||||
// TODO: Provide a way to configure syncs
|
||||
err := storeChart(ctx, s, ch, &action.ChartPathOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.ThickCharts
|
||||
if err := convert.ConvertThickCharts(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, chObj := range v1Cfg.Spec.Charts {
|
||||
tc, err := tchart.NewThickChart(chObj, &action.ChartPathOptions{
|
||||
RepoURL: chObj.RepoURL,
|
||||
Version: chObj.Version,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.AddOCICollection(ctx, tc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case "v1":
|
||||
var cfg v1.ThickCharts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, chObj := range cfg.Spec.Charts {
|
||||
tc, err := tchart.NewThickChart(chObj, &action.ChartPathOptions{
|
||||
RepoURL: chObj.RepoURL,
|
||||
Version: chObj.Version,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.AddOCICollection(ctx, tc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
case consts.ImageTxtsContentKind:
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
case v1alpha1.K3sCollectionKind:
|
||||
var cfg v1alpha1.K3s
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var alphaCfg v1alpha1.ImageTxts
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
k, err := k3s.NewK3s(cfg.Spec.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := s.AddOCICollection(ctx, k); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case v1alpha1.ChartsCollectionKind:
|
||||
var cfg v1alpha1.ThickCharts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cfg := range cfg.Spec.Charts {
|
||||
tc, err := tchart.NewThickChart(cfg, &action.ChartPathOptions{
|
||||
RepoURL: cfg.RepoURL,
|
||||
Version: cfg.Version,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.ImageTxts
|
||||
if err := convert.ConvertImageTxts(&alphaCfg, &v1Cfg); err != nil {
|
||||
|
||||
if _, err := s.AddOCICollection(ctx, tc); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, cfgIt := range v1Cfg.Spec.ImageTxts {
|
||||
it, err := imagetxt.New(cfgIt.Ref,
|
||||
imagetxt.WithIncludeSources(cfgIt.Sources.Include...),
|
||||
imagetxt.WithExcludeSources(cfgIt.Sources.Exclude...),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert ImageTxt %s: %v", v1Cfg.Name, err)
|
||||
}
|
||||
if _, err := s.AddOCICollection(ctx, it); err != nil {
|
||||
return fmt.Errorf("add ImageTxt %s to store: %v", v1Cfg.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
case v1alpha1.ImageTxtsContentKind:
|
||||
var cfg v1alpha1.ImageTxts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cfgIt := range cfg.Spec.ImageTxts {
|
||||
it, err := imagetxt.New(cfgIt.Ref,
|
||||
imagetxt.WithIncludeSources(cfgIt.Sources.Include...),
|
||||
imagetxt.WithExcludeSources(cfgIt.Sources.Exclude...),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert ImageTxt %s: %v", cfg.Name, err)
|
||||
}
|
||||
|
||||
case "v1":
|
||||
var cfg v1.ImageTxts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
if _, err := s.AddOCICollection(ctx, it); err != nil {
|
||||
return fmt.Errorf("add ImageTxt %s to store: %v", cfg.Name, err)
|
||||
}
|
||||
for _, cfgIt := range cfg.Spec.ImageTxts {
|
||||
it, err := imagetxt.New(cfgIt.Ref,
|
||||
imagetxt.WithIncludeSources(cfgIt.Sources.Include...),
|
||||
imagetxt.WithExcludeSources(cfgIt.Sources.Exclude...),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert ImageTxt %s: %v", cfg.Name, err)
|
||||
}
|
||||
if _, err := s.AddOCICollection(ctx, it); err != nil {
|
||||
return fmt.Errorf("add ImageTxt %s to store: %v", cfg.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported kind [%s]... valid kinds are [Files, Images, Charts, ThickCharts, ImageTxts]", gvk.Kind)
|
||||
return fmt.Errorf("unrecognized content/collection type: %s", obj.GroupVersionKind().String())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -5,12 +5,11 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/internal/version"
|
||||
"github.com/rancherfederal/hauler/internal/version"
|
||||
)
|
||||
|
||||
func addVersion(parent *cobra.Command, ro *flags.CliRootOpts) {
|
||||
o := &flags.VersionOpts{}
|
||||
func addVersion(parent *cobra.Command) {
|
||||
var json bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "version",
|
||||
@@ -23,7 +22,7 @@ func addVersion(parent *cobra.Command, ro *flags.CliRootOpts) {
|
||||
v.FontName = "starwars"
|
||||
cmd.SetOut(cmd.OutOrStdout())
|
||||
|
||||
if o.JSON {
|
||||
if json {
|
||||
out, err := v.JSONString()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to generate JSON from version info: %w", err)
|
||||
@@ -35,7 +34,7 @@ func addVersion(parent *cobra.Command, ro *flags.CliRootOpts) {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
cmd.Flags().BoolVar(&json, "json", false, "toggle output in JSON")
|
||||
|
||||
parent.AddCommand(cmd)
|
||||
}
|
||||
|
||||
@@ -2,13 +2,17 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"embed"
|
||||
"os"
|
||||
|
||||
"hauler.dev/go/hauler/cmd/hauler/cli"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/cmd/hauler/cli"
|
||||
"github.com/rancherfederal/hauler/pkg/cosign"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
)
|
||||
|
||||
//go:embed binaries/*
|
||||
var binaries embed.FS
|
||||
|
||||
func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@@ -16,7 +20,13 @@ func main() {
|
||||
logger := log.NewLogger(os.Stdout)
|
||||
ctx = logger.WithContext(ctx)
|
||||
|
||||
if err := cli.New(ctx, &flags.CliRootOpts{}).ExecuteContext(ctx); err != nil {
|
||||
// ensure cosign binary is available
|
||||
if err := cosign.EnsureBinaryExists(ctx, binaries); err != nil {
|
||||
logger.Errorf("%v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := cli.New().ExecuteContext(ctx); err != nil {
|
||||
logger.Errorf("%v", err)
|
||||
cancel()
|
||||
os.Exit(1)
|
||||
|
||||
362
go.mod
362
go.mod
@@ -1,337 +1,175 @@
|
||||
module hauler.dev/go/hauler
|
||||
module github.com/rancherfederal/hauler
|
||||
|
||||
go 1.23.4
|
||||
|
||||
toolchain go1.24.2
|
||||
|
||||
replace github.com/sigstore/cosign/v2 => github.com/hauler-dev/cosign/v2 v2.4.3-0.20250404165522-3a44ef646a65
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be
|
||||
github.com/containerd/containerd v1.7.27
|
||||
github.com/containerd/containerd v1.7.11
|
||||
github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2
|
||||
github.com/google/go-containerregistry v0.20.3
|
||||
github.com/docker/go-metrics v0.0.1
|
||||
github.com/google/go-containerregistry v0.16.1
|
||||
github.com/gorilla/handlers v1.5.1
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/mholt/archives v0.1.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/mholt/archiver/v3 v3.5.1
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
github.com/opencontainers/image-spec v1.1.0-rc6
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rs/zerolog v1.31.0
|
||||
github.com/sigstore/cosign/v2 v2.4.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.11.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
golang.org/x/sync v0.12.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
helm.sh/helm/v3 v3.17.3
|
||||
k8s.io/apimachinery v0.32.2
|
||||
k8s.io/client-go v0.32.2
|
||||
github.com/spf13/afero v1.10.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
golang.org/x/sync v0.6.0
|
||||
helm.sh/helm/v3 v3.14.2
|
||||
k8s.io/apimachinery v0.29.0
|
||||
k8s.io/client-go v0.29.0
|
||||
oras.land/oras-go v1.2.5
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.14.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20241125120445-2c00c104c6e1 // indirect
|
||||
cuelang.org/go v0.12.0 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.1 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.2.1 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect
|
||||
github.com/STARRY-S/zip v0.2.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.4 // indirect
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect
|
||||
github.com/ThalesIgnite/crypto11 v1.2.5 // indirect
|
||||
github.com/agnivade/levenshtein v1.2.0 // indirect
|
||||
github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect
|
||||
github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect
|
||||
github.com/alibabacloud-go/cr-20181201 v1.0.10 // indirect
|
||||
github.com/alibabacloud-go/darabonba-openapi v0.2.1 // indirect
|
||||
github.com/alibabacloud-go/debug v1.0.0 // indirect
|
||||
github.com/alibabacloud-go/endpoint-util v1.1.1 // indirect
|
||||
github.com/alibabacloud-go/openapi-util v0.1.0 // indirect
|
||||
github.com/alibabacloud-go/tea v1.2.1 // indirect
|
||||
github.com/alibabacloud-go/tea-utils v1.4.5 // indirect
|
||||
github.com/alibabacloud-go/tea-xml v1.1.3 // indirect
|
||||
github.com/aliyun/credentials-go v1.3.2 // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.34.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.55 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.29 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.29 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.10 // indirect
|
||||
github.com/aws/smithy-go v1.22.2 // indirect
|
||||
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 // indirect
|
||||
github.com/andybalholm/brotli v1.0.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.0 // indirect
|
||||
github.com/bodgit/windows v1.0.1 // indirect
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect
|
||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd // indirect
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b // indirect
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 // indirect
|
||||
github.com/buildkite/agent/v3 v3.91.0 // indirect
|
||||
github.com/buildkite/go-pipeline v0.13.3 // indirect
|
||||
github.com/buildkite/interpolate v0.1.5 // indirect
|
||||
github.com/buildkite/roko v1.3.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect
|
||||
github.com/chzyer/readline v1.5.1 // indirect
|
||||
github.com/clbanning/mxj/v2 v2.7.0 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/cockroachdb/apd/v3 v3.2.1 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.12.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect
|
||||
github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v27.5.0+incompatible // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/docker/cli v25.0.1+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v27.5.0+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/docker v25.0.6+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/emicklei/proto v1.13.4 // indirect
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.16.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-chi/chi v4.1.2+incompatible // indirect
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.23.0 // indirect
|
||||
github.com/go-openapi/errors v0.22.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/loads v0.22.0 // indirect
|
||||
github.com/go-openapi/runtime v0.28.0 // indirect
|
||||
github.com/go-openapi/spec v0.21.0 // indirect
|
||||
github.com/go-openapi/strfmt v0.23.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-openapi/validate v0.24.0 // indirect
|
||||
github.com/go-piv/piv-go/v2 v2.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.2 // indirect
|
||||
github.com/gomodule/redigo v1.8.2 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/certificate-transparency-go v1.3.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-github/v55 v55.0.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/hcl v1.0.1-vault-5 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/in-toto/attestation v1.1.0 // indirect
|
||||
github.com/in-toto/in-toto-golang v0.9.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.3.5 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/klauspost/compress v1.16.5 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/magiconair/properties v1.8.9 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/manifoldco/promptui v0.9.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/mozillazg/docker-credential-acr-helper v0.4.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/oleiade/reflections v1.1.0 // indirect
|
||||
github.com/open-policy-agent/opa v1.1.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pborman/uuid v1.2.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/nwaples/rardecode v1.1.0 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/prometheus/client_golang v1.20.5 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/protocolbuffers/txtpbfmt v0.0.0-20241112170944-20d2c9ebc01d // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a // indirect
|
||||
github.com/rubenv/sql-migrate v1.7.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.2 // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/rubenv/sql-migrate v1.5.2 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sassoftware/relic v7.2.1+incompatible // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/shibumi/go-pathspec v1.3.0 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sigstore/fulcio v1.6.6 // indirect
|
||||
github.com/sigstore/protobuf-specs v0.4.0 // indirect
|
||||
github.com/sigstore/rekor v1.3.9 // indirect
|
||||
github.com/sigstore/sigstore v1.8.12 // indirect
|
||||
github.com/sigstore/sigstore-go v0.7.0 // indirect
|
||||
github.com/sigstore/timestamp-authority v1.2.4 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/sorairolake/lzip-go v0.3.5 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spf13/viper v1.19.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 // indirect
|
||||
github.com/thales-e-security/pool v0.0.2 // indirect
|
||||
github.com/therootcompany/xz v1.0.1 // indirect
|
||||
github.com/theupdateframework/go-tuf v0.7.0 // indirect
|
||||
github.com/theupdateframework/go-tuf/v2 v2.0.2 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/tjfoc/gmsm v1.4.1 // indirect
|
||||
github.com/transparency-dev/merkle v0.0.2 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/vbatts/tar-split v0.11.6 // indirect
|
||||
github.com/withfig/autocomplete-tools/integrations/cobra v1.2.1 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/ulikunitz/xz v0.5.9 // indirect
|
||||
github.com/vbatts/tar-split v0.11.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/yashtewari/glob-intersection v0.2.0 // indirect
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 // indirect
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 // indirect
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
gitlab.com/gitlab-org/api/client-go v0.121.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
|
||||
go.opentelemetry.io/otel v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
google.golang.org/api v0.219.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250124145028-65684f501c47 // indirect
|
||||
google.golang.org/grpc v1.70.0 // indirect
|
||||
google.golang.org/protobuf v1.36.4 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
|
||||
go.opentelemetry.io/otel v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.19.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/oauth2 v0.10.0 // indirect
|
||||
golang.org/x/sys v0.18.0 // indirect
|
||||
golang.org/x/term v0.18.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/grpc v1.58.3 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/api v0.32.2 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.32.2 // indirect
|
||||
k8s.io/apiserver v0.32.2 // indirect
|
||||
k8s.io/cli-runtime v0.32.2 // indirect
|
||||
k8s.io/component-base v0.32.2 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
k8s.io/kubectl v0.32.2 // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.18.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
|
||||
sigs.k8s.io/release-utils v0.11.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/api v0.29.0 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.29.0 // indirect
|
||||
k8s.io/apiserver v0.29.0 // indirect
|
||||
k8s.io/cli-runtime v0.29.0 // indirect
|
||||
k8s.io/component-base v0.29.0 // indirect
|
||||
k8s.io/klog/v2 v2.110.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
|
||||
k8s.io/kubectl v0.29.0 // indirect
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
40
install.sh
40
install.sh
@@ -17,10 +17,6 @@
|
||||
# - curl -sfL https://get.hauler.dev | HAULER_INSTALL_DIR=/usr/local/bin bash
|
||||
# - HAULER_INSTALL_DIR=/usr/local/bin ./install.sh
|
||||
#
|
||||
# Set Hauler Directory
|
||||
# - curl -sfL https://get.hauler.dev | HAULER_DIR=$HOME/.hauler bash
|
||||
# - HAULER_DIR=$HOME/.hauler ./install.sh
|
||||
#
|
||||
# Debug Usage:
|
||||
# - curl -sfL https://get.hauler.dev | HAULER_DEBUG=true bash
|
||||
# - HAULER_DEBUG=true ./install.sh
|
||||
@@ -69,7 +65,7 @@ done
|
||||
# set install directory from argument or environment variable
|
||||
HAULER_INSTALL_DIR=${HAULER_INSTALL_DIR:-/usr/local/bin}
|
||||
|
||||
# ensure install directory exists and/or create it
|
||||
# ensure install directory exists
|
||||
if [ ! -d "${HAULER_INSTALL_DIR}" ]; then
|
||||
mkdir -p "${HAULER_INSTALL_DIR}" || fatal "Failed to Create Install Directory: ${HAULER_INSTALL_DIR}"
|
||||
fi
|
||||
@@ -86,8 +82,8 @@ if [ "${HAULER_UNINSTALL}" = "true" ]; then
|
||||
# remove the hauler binary
|
||||
rm -rf "${HAULER_INSTALL_DIR}/hauler" || fatal "Failed to Remove Hauler from ${HAULER_INSTALL_DIR}"
|
||||
|
||||
# remove the hauler directory
|
||||
rm -rf "${HAULER_DIR}" || fatal "Failed to Remove Hauler Directory: ${HAULER_DIR}"
|
||||
# remove the working directory
|
||||
rm -rf "$HOME/.hauler" || fatal "Failed to Remove Hauler Directory: $HOME/.hauler"
|
||||
|
||||
info "Successfully Uninstalled Hauler" && echo
|
||||
exit 0
|
||||
@@ -96,11 +92,11 @@ fi
|
||||
# set version environment variable
|
||||
if [ -z "${HAULER_VERSION}" ]; then
|
||||
# attempt to retrieve the latest version from GitHub
|
||||
HAULER_VERSION=$(curl -sI https://github.com/hauler-dev/hauler/releases/latest | grep -i location | sed -e 's#.*tag/v##' -e 's/^[[:space:]]*//g' -e 's/[[:space:]]*$//g')
|
||||
HAULER_VERSION=$(curl -s https://api.github.com/repos/hauler-dev/hauler/releases/latest | grep '"tag_name":' | sed 's/.*"v\([^"]*\)".*/\1/')
|
||||
|
||||
# exit if the version could not be detected
|
||||
if [ -z "${HAULER_VERSION}" ]; then
|
||||
fatal "HAULER_VERSION is unable to be detected and/or retrieved from GitHub. Please set: HAULER_VERSION"
|
||||
fatal "HAULER_VERSION is unable to be detected and/or retrieved from GitHub"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -114,7 +110,7 @@ case $PLATFORM in
|
||||
PLATFORM="darwin"
|
||||
;;
|
||||
*)
|
||||
fatal "Unsupported Platform: ${PLATFORM}"
|
||||
fatal "Unsupported Platform: $PLATFORM"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -128,33 +124,29 @@ case $ARCH in
|
||||
ARCH="arm64"
|
||||
;;
|
||||
*)
|
||||
fatal "Unsupported Architecture: ${ARCH}"
|
||||
fatal "Unsupported Architecture: $ARCH"
|
||||
;;
|
||||
esac
|
||||
|
||||
# set hauler directory from argument or environment variable
|
||||
HAULER_DIR=${HAULER_DIR:-$HOME/.hauler}
|
||||
|
||||
# start hauler installation
|
||||
info "Starting Installation..."
|
||||
|
||||
# display the version, platform, and architecture
|
||||
verbose "- Version: v${HAULER_VERSION}"
|
||||
verbose "- Platform: ${PLATFORM}"
|
||||
verbose "- Architecture: ${ARCH}"
|
||||
verbose "- Platform: $PLATFORM"
|
||||
verbose "- Architecture: $ARCH"
|
||||
verbose "- Install Directory: ${HAULER_INSTALL_DIR}"
|
||||
verbose "- Hauler Directory: ${HAULER_DIR}"
|
||||
|
||||
# ensure hauler directory exists and/or create it
|
||||
if [ ! -d "${HAULER_DIR}" ]; then
|
||||
mkdir -p "${HAULER_DIR}" || fatal "Failed to Create Hauler Directory: ${HAULER_DIR}"
|
||||
# check working directory and/or create it
|
||||
if [ ! -d "$HOME/.hauler" ]; then
|
||||
mkdir -p "$HOME/.hauler" || fatal "Failed to Create Directory: $HOME/.hauler"
|
||||
fi
|
||||
|
||||
# ensure hauler directory is writable (by user or root privileges)
|
||||
chmod -R 777 "${HAULER_DIR}" || fatal "Failed to Update Permissions of Hauler Directory: ${HAULER_DIR}"
|
||||
# update permissions of working directory
|
||||
chmod -R 777 "$HOME/.hauler" || fatal "Failed to Update Permissions of Directory: $HOME/.hauler"
|
||||
|
||||
# change to hauler directory
|
||||
cd "${HAULER_DIR}" || fatal "Failed to Change Directory to Hauler Directory: ${HAULER_DIR}"
|
||||
# change to working directory
|
||||
cd "$HOME/.hauler" || fatal "Failed to Change Directory: $HOME/.hauler"
|
||||
|
||||
# start hauler artifacts download
|
||||
info "Starting Download..."
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
)
|
||||
|
||||
type AddImageOpts struct {
|
||||
*StoreRootOpts
|
||||
Name string
|
||||
Key string
|
||||
CertOidcIssuer string
|
||||
CertOidcIssuerRegexp string
|
||||
CertIdentity string
|
||||
CertIdentityRegexp string
|
||||
CertGithubWorkflowRepository string
|
||||
Tlog bool
|
||||
Platform string
|
||||
}
|
||||
|
||||
func (o *AddImageOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Location of public key to use for signature verification")
|
||||
f.StringVar(&o.CertIdentity, "certificate-identity", "", "(Optional) Cosign certificate-identity (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertIdentityRegexp, "certificate-identity-regexp", "", "(Optional) Cosign certificate-identity-regexp (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertOidcIssuer, "certificate-oidc-issuer", "", "(Optional) Cosign option to validate oidc issuer")
|
||||
f.StringVar(&o.CertOidcIssuerRegexp, "certificate-oidc-issuer-regexp", "", "(Optional) Cosign option to validate oidc issuer with regex")
|
||||
f.StringVar(&o.CertGithubWorkflowRepository, "certificate-github-workflow-repository", "", "(Optional) Cosign certificate-github-workflow-repository option")
|
||||
f.BoolVarP(&o.Tlog, "use-tlog-verify", "v", false, "(Optional) Allow transparency log verification. (defaults to false)")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specifiy the platform of the image... i.e. linux/amd64 (defaults to all)")
|
||||
}
|
||||
|
||||
type AddFileOpts struct {
|
||||
*StoreRootOpts
|
||||
Name string
|
||||
}
|
||||
|
||||
func (o *AddFileOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.StringVarP(&o.Name, "name", "n", "", "(Optional) Rewrite the name of the file")
|
||||
}
|
||||
|
||||
type AddChartOpts struct {
|
||||
*StoreRootOpts
|
||||
|
||||
ChartOpts *action.ChartPathOptions
|
||||
}
|
||||
|
||||
func (o *AddChartOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVar(&o.ChartOpts.RepoURL, "repo", "", "Location of the chart (https:// | http:// | oci://)")
|
||||
f.StringVar(&o.ChartOpts.Version, "version", "", "(Optional) Specifiy the version of the chart (v1.0.0 | 2.0.0 | ^2.0.0)")
|
||||
f.BoolVar(&o.ChartOpts.Verify, "verify", false, "(Optional) Verify the chart before fetching it")
|
||||
f.StringVar(&o.ChartOpts.Username, "username", "", "(Optional) Username to use for authentication")
|
||||
f.StringVar(&o.ChartOpts.Password, "password", "", "(Optional) Password to use for authentication")
|
||||
f.StringVar(&o.ChartOpts.CertFile, "cert-file", "", "(Optional) Location of the TLS Certificate to use for client authenication")
|
||||
f.StringVar(&o.ChartOpts.KeyFile, "key-file", "", "(Optional) Location of the TLS Key to use for client authenication")
|
||||
f.BoolVar(&o.ChartOpts.InsecureSkipTLSverify, "insecure-skip-tls-verify", false, "(Optional) Skip TLS certificate verification")
|
||||
f.StringVar(&o.ChartOpts.CaFile, "ca-file", "", "(Optional) Location of CA Bundle to enable certification verification")
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
type CliRootOpts struct {
|
||||
LogLevel string
|
||||
HaulerDir string
|
||||
IgnoreErrors bool
|
||||
}
|
||||
|
||||
func AddRootFlags(cmd *cobra.Command, ro *CliRootOpts) {
|
||||
pf := cmd.PersistentFlags()
|
||||
|
||||
pf.StringVarP(&ro.LogLevel, "log-level", "l", "info", "Set the logging level (i.e. info, debug, warn)")
|
||||
pf.StringVarP(&ro.HaulerDir, "haulerdir", "d", "", "Set the location of the hauler directory (default $HOME/.hauler)")
|
||||
pf.BoolVar(&ro.IgnoreErrors, "ignore-errors", false, "Ignore/Bypass errors (i.e. warn on error) (defaults false)")
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
type CopyOpts struct {
|
||||
*StoreRootOpts
|
||||
|
||||
Username string
|
||||
Password string
|
||||
Insecure bool
|
||||
PlainHTTP bool
|
||||
Only string
|
||||
}
|
||||
|
||||
func (o *CopyOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.Username, "username", "u", "", "(Optional) Username to use for authentication")
|
||||
f.StringVarP(&o.Password, "password", "p", "", "(Optional) Password to use for authentication")
|
||||
f.BoolVar(&o.Insecure, "insecure", false, "(Optional) Allow insecure connections")
|
||||
f.BoolVar(&o.PlainHTTP, "plain-http", false, "(Optional) Allow plain HTTP connections")
|
||||
f.StringVarP(&o.Only, "only", "o", "", "(Optional) Custom string array to only copy specific 'image' items, this flag is comma delimited. ex: --only=sig,att,sbom")
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
type ExtractOpts struct {
|
||||
*StoreRootOpts
|
||||
DestinationDir string
|
||||
}
|
||||
|
||||
func (o *ExtractOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.DestinationDir, "output", "o", "", "(Optional) Set the directory to output (defaults to current directory)")
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
type InfoOpts struct {
|
||||
*StoreRootOpts
|
||||
|
||||
OutputFormat string
|
||||
TypeFilter string
|
||||
SizeUnit string
|
||||
ListRepos bool
|
||||
}
|
||||
|
||||
func (o *InfoOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.OutputFormat, "output", "o", "table", "(Optional) Specify the output format (table | json)")
|
||||
f.StringVarP(&o.TypeFilter, "type", "t", "all", "(Optional) Filter on content type (image | chart | file | sigs | atts | sbom)")
|
||||
f.BoolVar(&o.ListRepos, "list-repos", false, "(Optional) List all repository names")
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type LoadOpts struct {
|
||||
*StoreRootOpts
|
||||
FileName []string
|
||||
TempOverride string
|
||||
}
|
||||
|
||||
func (o *LoadOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
// On Unix systems, the default is $TMPDIR if non-empty, else /tmp
|
||||
// On Windows, the default is GetTempPath, returning the first value from %TMP%, %TEMP%, %USERPROFILE%, or Windows directory
|
||||
f.StringSliceVarP(&o.FileName, "filename", "f", []string{consts.DefaultHaulerArchiveName}, "(Optional) Specify the name of inputted haul(s)")
|
||||
f.StringVarP(&o.TempOverride, "tempdir", "t", "", "(Optional) Override the default temporary directiory determined by the OS")
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type SaveOpts struct {
|
||||
*StoreRootOpts
|
||||
FileName string
|
||||
Platform string
|
||||
}
|
||||
|
||||
func (o *SaveOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.FileName, "filename", "f", consts.DefaultHaulerArchiveName, "(Optional) Specify the name of outputted haul")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform for runtime imports... i.e. linux/amd64 (unspecified implies all)")
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type ServeRegistryOpts struct {
|
||||
*StoreRootOpts
|
||||
|
||||
Port int
|
||||
RootDir string
|
||||
ConfigFile string
|
||||
ReadOnly bool
|
||||
|
||||
TLSCert string
|
||||
TLSKey string
|
||||
}
|
||||
|
||||
func (o *ServeRegistryOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.IntVarP(&o.Port, "port", "p", consts.DefaultRegistryPort, "(Optional) Set the port to use for incoming connections")
|
||||
f.StringVar(&o.RootDir, "directory", consts.DefaultRegistryRootDir, "(Optional) Directory to use for backend. Defaults to $PWD/registry")
|
||||
f.StringVarP(&o.ConfigFile, "config", "c", "", "(Optional) Location of config file (overrides all flags)")
|
||||
f.BoolVar(&o.ReadOnly, "readonly", true, "(Optional) Run the registry as readonly")
|
||||
|
||||
f.StringVar(&o.TLSCert, "tls-cert", "", "(Optional) Location of the TLS Certificate to use for server authenication")
|
||||
f.StringVar(&o.TLSKey, "tls-key", "", "(Optional) Location of the TLS Key to use for server authenication")
|
||||
|
||||
cmd.MarkFlagsRequiredTogether("tls-cert", "tls-key")
|
||||
}
|
||||
|
||||
type ServeFilesOpts struct {
|
||||
*StoreRootOpts
|
||||
|
||||
Port int
|
||||
Timeout int
|
||||
RootDir string
|
||||
|
||||
TLSCert string
|
||||
TLSKey string
|
||||
}
|
||||
|
||||
func (o *ServeFilesOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.IntVarP(&o.Port, "port", "p", consts.DefaultFileserverPort, "(Optional) Set the port to use for incoming connections")
|
||||
f.IntVarP(&o.Timeout, "timeout", "t", consts.DefaultFileserverTimeout, "(Optional) Timeout duration for HTTP Requests in seconds for both reads/writes")
|
||||
f.StringVar(&o.RootDir, "directory", consts.DefaultFileserverRootDir, "(Optional) Directory to use for backend. Defaults to $PWD/fileserver")
|
||||
|
||||
f.StringVar(&o.TLSCert, "tls-cert", "", "(Optional) Location of the TLS Certificate to use for server authenication")
|
||||
f.StringVar(&o.TLSKey, "tls-key", "", "(Optional) Location of the TLS Key to use for server authenication")
|
||||
|
||||
cmd.MarkFlagsRequiredTogether("tls-cert", "tls-key")
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
type StoreRootOpts struct {
|
||||
StoreDir string
|
||||
Retries int
|
||||
}
|
||||
|
||||
func (o *StoreRootOpts) AddFlags(cmd *cobra.Command) {
|
||||
pf := cmd.PersistentFlags()
|
||||
pf.StringVarP(&o.StoreDir, "store", "s", "", "Set the directory to use for the content store")
|
||||
pf.IntVarP(&o.Retries, "retries", "r", consts.DefaultRetries, "Set the number of retries for operations")
|
||||
}
|
||||
|
||||
func (o *StoreRootOpts) Store(ctx context.Context) (*store.Layout, error) {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
storeDir := o.StoreDir
|
||||
|
||||
if storeDir == "" {
|
||||
storeDir = os.Getenv(consts.HaulerStoreDir)
|
||||
}
|
||||
|
||||
if storeDir == "" {
|
||||
storeDir = consts.DefaultStoreName
|
||||
}
|
||||
|
||||
abs, err := filepath.Abs(storeDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.StoreDir = abs
|
||||
|
||||
l.Debugf("using store at [%s]", abs)
|
||||
|
||||
if _, err := os.Stat(abs); errors.Is(err, os.ErrNotExist) {
|
||||
if err := os.MkdirAll(abs, os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s, err := store.NewLayout(abs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type SyncOpts struct {
|
||||
*StoreRootOpts
|
||||
FileName []string
|
||||
Key string
|
||||
CertOidcIssuer string
|
||||
CertOidcIssuerRegexp string
|
||||
CertIdentity string
|
||||
CertIdentityRegexp string
|
||||
CertGithubWorkflowRepository string
|
||||
Products []string
|
||||
Platform string
|
||||
Registry string
|
||||
ProductRegistry string
|
||||
TempOverride string
|
||||
Tlog bool
|
||||
}
|
||||
|
||||
func (o *SyncOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringSliceVarP(&o.FileName, "filename", "f", []string{consts.DefaultHaulerManifestName}, "Specify the name of manifest(s) to sync")
|
||||
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Location of public key to use for signature verification")
|
||||
f.StringVar(&o.CertIdentity, "certificate-identity", "", "(Optional) Cosign certificate-identity (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertIdentityRegexp, "certificate-identity-regexp", "", "(Optional) Cosign certificate-identity-regexp (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertOidcIssuer, "certificate-oidc-issuer", "", "(Optional) Cosign option to validate oidc issuer")
|
||||
f.StringVar(&o.CertOidcIssuerRegexp, "certificate-oidc-issuer-regexp", "", "(Optional) Cosign option to validate oidc issuer with regex")
|
||||
f.StringVar(&o.CertGithubWorkflowRepository, "certificate-github-workflow-repository", "", "(Optional) Cosign certificate-github-workflow-repository option")
|
||||
f.StringSliceVar(&o.Products, "products", []string{}, "(Optional) Specify the product name to fetch collections from the product registry i.e. rancher=v2.10.1,rke2=v1.31.5+rke2r1")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform of the image... i.e linux/amd64 (defaults to all)")
|
||||
f.StringVarP(&o.Registry, "registry", "g", "", "(Optional) Specify the registry of the image for images that do not alredy define one")
|
||||
f.StringVarP(&o.ProductRegistry, "product-registry", "c", "", "(Optional) Specify the product registry. Defaults to RGS Carbide Registry (rgcrprod.azurecr.us)")
|
||||
f.StringVarP(&o.TempOverride, "tempdir", "t", "", "(Optional) Override the default temporary directiory determined by the OS")
|
||||
f.BoolVarP(&o.Tlog, "use-tlog-verify", "v", false, "(Optional) Allow transparency log verification. (defaults to false)")
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
type VersionOpts struct {
|
||||
JSON bool
|
||||
}
|
||||
|
||||
func (o *VersionOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.BoolVar(&o.JSON, "json", false, "Set the output format to JSON")
|
||||
}
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"oras.land/oras-go/pkg/target"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type Fn func(desc ocispec.Descriptor) (string, error)
|
||||
@@ -36,7 +36,7 @@ func Images() map[string]Fn {
|
||||
m := make(map[string]Fn)
|
||||
|
||||
manifestMapperFn := Fn(func(desc ocispec.Descriptor) (string, error) {
|
||||
return consts.ImageManifestFile, nil
|
||||
return "manifest.json", nil
|
||||
})
|
||||
|
||||
for _, l := range []string{consts.DockerManifestSchema2, consts.DockerManifestListSchema2, consts.OCIManifestSchema1} {
|
||||
@@ -52,7 +52,7 @@ func Images() map[string]Fn {
|
||||
}
|
||||
|
||||
configMapperFn := Fn(func(desc ocispec.Descriptor) (string, error) {
|
||||
return consts.ImageConfigFile, nil
|
||||
return "config.json", nil
|
||||
})
|
||||
|
||||
for _, l := range []string{consts.DockerConfigJSON} {
|
||||
|
||||
@@ -9,25 +9,30 @@ import (
|
||||
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/gorilla/mux"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type FileConfig struct {
|
||||
Root string
|
||||
Host string
|
||||
Port int
|
||||
Timeout int
|
||||
}
|
||||
|
||||
// NewFile returns a fileserver
|
||||
// TODO: Better configs
|
||||
func NewFile(ctx context.Context, cfg flags.ServeFilesOpts) (Server, error) {
|
||||
func NewFile(ctx context.Context, cfg FileConfig) (Server, error) {
|
||||
r := mux.NewRouter()
|
||||
r.PathPrefix("/").Handler(handlers.LoggingHandler(os.Stdout, http.StripPrefix("/", http.FileServer(http.Dir(cfg.RootDir)))))
|
||||
if cfg.RootDir == "" {
|
||||
cfg.RootDir = "."
|
||||
r.PathPrefix("/").Handler(handlers.LoggingHandler(os.Stdout, http.StripPrefix("/", http.FileServer(http.Dir(cfg.Root)))))
|
||||
if cfg.Root == "" {
|
||||
cfg.Root = "."
|
||||
}
|
||||
|
||||
if cfg.Port == 0 {
|
||||
cfg.Port = consts.DefaultFileserverPort
|
||||
cfg.Port = 8080
|
||||
}
|
||||
|
||||
if cfg.Timeout == 0 {
|
||||
cfg.Timeout = consts.DefaultFileserverTimeout
|
||||
cfg.Timeout = 60
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/distribution/distribution/v3/configuration"
|
||||
"github.com/distribution/distribution/v3/registry"
|
||||
"github.com/distribution/distribution/v3/registry/handlers"
|
||||
"github.com/docker/go-metrics"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -21,6 +22,14 @@ func NewRegistry(ctx context.Context, cfg *configuration.Configuration) (*regist
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.HTTP.Debug.Prometheus.Enabled {
|
||||
path := cfg.HTTP.Debug.Prometheus.Path
|
||||
if path == "" {
|
||||
path = "/metrics"
|
||||
}
|
||||
http.Handle(path, metrics.Handler())
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
@@ -36,7 +45,7 @@ func NewTempRegistry(ctx context.Context, root string) *tmpRegistryServer {
|
||||
"filesystem": configuration.Parameters{"rootdirectory": root},
|
||||
},
|
||||
}
|
||||
|
||||
// Add validation configuration
|
||||
cfg.Validation.Manifests.URLs.Allow = []string{".+"}
|
||||
|
||||
cfg.Log.Level = "error"
|
||||
|
||||
@@ -2,5 +2,4 @@ package server
|
||||
|
||||
type Server interface {
|
||||
ListenAndServe() error
|
||||
ListenAndServeTLS(string, string) error
|
||||
}
|
||||
|
||||
@@ -28,9 +28,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/common-nighthawk/go-figure"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
const unknown = "unknown"
|
||||
|
||||
// Base version information.
|
||||
//
|
||||
// This is the fallback data used when version information from git is not
|
||||
@@ -40,19 +41,19 @@ var (
|
||||
// branch should be tagged using the correct versioning strategy.
|
||||
gitVersion = "devel"
|
||||
// SHA1 from git, output of $(git rev-parse HEAD)
|
||||
gitCommit = consts.Unknown
|
||||
gitCommit = unknown
|
||||
// State of git tree, either "clean" or "dirty"
|
||||
gitTreeState = consts.Unknown
|
||||
gitTreeState = unknown
|
||||
// Build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
buildDate = consts.Unknown
|
||||
buildDate = unknown
|
||||
// flag to print the ascii name banner
|
||||
asciiName = "true"
|
||||
// goVersion is the used golang version.
|
||||
goVersion = consts.Unknown
|
||||
goVersion = unknown
|
||||
// compiler is the used golang compiler.
|
||||
compiler = consts.Unknown
|
||||
compiler = unknown
|
||||
// platform is the used os/arch identifier.
|
||||
platform = consts.Unknown
|
||||
platform = unknown
|
||||
|
||||
once sync.Once
|
||||
info = Info{}
|
||||
@@ -83,7 +84,7 @@ func getBuildInfo() *debug.BuildInfo {
|
||||
|
||||
func getGitVersion(bi *debug.BuildInfo) string {
|
||||
if bi == nil {
|
||||
return consts.Unknown
|
||||
return unknown
|
||||
}
|
||||
|
||||
// TODO: remove this when the issue https://github.com/golang/go/issues/29228 is fixed
|
||||
@@ -106,28 +107,28 @@ func getDirty(bi *debug.BuildInfo) string {
|
||||
if modified == "false" {
|
||||
return "clean"
|
||||
}
|
||||
return consts.Unknown
|
||||
return unknown
|
||||
}
|
||||
|
||||
func getBuildDate(bi *debug.BuildInfo) string {
|
||||
buildTime := getKey(bi, "vcs.time")
|
||||
t, err := time.Parse("2006-01-02T15:04:05Z", buildTime)
|
||||
if err != nil {
|
||||
return consts.Unknown
|
||||
return unknown
|
||||
}
|
||||
return t.Format("2006-01-02T15:04:05")
|
||||
}
|
||||
|
||||
func getKey(bi *debug.BuildInfo, key string) string {
|
||||
if bi == nil {
|
||||
return consts.Unknown
|
||||
return unknown
|
||||
}
|
||||
for _, iter := range bi.Settings {
|
||||
if iter.Key == key {
|
||||
return iter.Value
|
||||
}
|
||||
}
|
||||
return consts.Unknown
|
||||
return unknown
|
||||
}
|
||||
|
||||
// GetVersionInfo represents known information on how this binary was built.
|
||||
@@ -135,27 +136,27 @@ func GetVersionInfo() Info {
|
||||
once.Do(func() {
|
||||
buildInfo := getBuildInfo()
|
||||
gitVersion = getGitVersion(buildInfo)
|
||||
if gitCommit == consts.Unknown {
|
||||
if gitCommit == unknown {
|
||||
gitCommit = getCommit(buildInfo)
|
||||
}
|
||||
|
||||
if gitTreeState == consts.Unknown {
|
||||
if gitTreeState == unknown {
|
||||
gitTreeState = getDirty(buildInfo)
|
||||
}
|
||||
|
||||
if buildDate == consts.Unknown {
|
||||
if buildDate == unknown {
|
||||
buildDate = getBuildDate(buildInfo)
|
||||
}
|
||||
|
||||
if goVersion == consts.Unknown {
|
||||
if goVersion == unknown {
|
||||
goVersion = runtime.Version()
|
||||
}
|
||||
|
||||
if compiler == consts.Unknown {
|
||||
if compiler == unknown {
|
||||
compiler = runtime.Compiler
|
||||
}
|
||||
|
||||
if platform == consts.Unknown {
|
||||
if platform == unknown {
|
||||
platform = fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
v1alpha1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
)
|
||||
|
||||
// converts v1alpha1.Files -> v1.Files
|
||||
func ConvertFiles(in *v1alpha1.Files, out *v1.Files) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.Files = make([]v1.File, len(in.Spec.Files))
|
||||
for i := range in.Spec.Files {
|
||||
out.Spec.Files[i].Name = in.Spec.Files[i].Name
|
||||
out.Spec.Files[i].Path = in.Spec.Files[i].Path
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// converts v1alpha1.Images -> v1.Images
|
||||
func ConvertImages(in *v1alpha1.Images, out *v1.Images) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.Images = make([]v1.Image, len(in.Spec.Images))
|
||||
for i := range in.Spec.Images {
|
||||
out.Spec.Images[i].Name = in.Spec.Images[i].Name
|
||||
out.Spec.Images[i].Platform = in.Spec.Images[i].Platform
|
||||
out.Spec.Images[i].Key = in.Spec.Images[i].Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// converts v1alpha1.Charts -> v1.Charts
|
||||
func ConvertCharts(in *v1alpha1.Charts, out *v1.Charts) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.Charts = make([]v1.Chart, len(in.Spec.Charts))
|
||||
for i := range in.Spec.Charts {
|
||||
out.Spec.Charts[i].Name = in.Spec.Charts[i].Name
|
||||
out.Spec.Charts[i].RepoURL = in.Spec.Charts[i].RepoURL
|
||||
out.Spec.Charts[i].Version = in.Spec.Charts[i].Version
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// converts v1alpha1.ThickCharts -> v1.ThickCharts
|
||||
func ConvertThickCharts(in *v1alpha1.ThickCharts, out *v1.ThickCharts) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.Charts = make([]v1.ThickChart, len(in.Spec.Charts))
|
||||
for i := range in.Spec.Charts {
|
||||
out.Spec.Charts[i].Chart.Name = in.Spec.Charts[i].Chart.Name
|
||||
out.Spec.Charts[i].Chart.RepoURL = in.Spec.Charts[i].Chart.RepoURL
|
||||
out.Spec.Charts[i].Chart.Version = in.Spec.Charts[i].Chart.Version
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// converts v1alpha1.ImageTxts -> v1.ImageTxts
|
||||
func ConvertImageTxts(in *v1alpha1.ImageTxts, out *v1.ImageTxts) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.ImageTxts = make([]v1.ImageTxt, len(in.Spec.ImageTxts))
|
||||
for i := range in.Spec.ImageTxts {
|
||||
out.Spec.ImageTxts[i].Ref = in.Spec.ImageTxts[i].Ref
|
||||
out.Spec.ImageTxts[i].Sources.Include = append(
|
||||
out.Spec.ImageTxts[i].Sources.Include,
|
||||
in.Spec.ImageTxts[i].Sources.Include...,
|
||||
)
|
||||
out.Spec.ImageTxts[i].Sources.Exclude = append(
|
||||
out.Spec.ImageTxts[i].Sources.Exclude,
|
||||
in.Spec.ImageTxts[i].Sources.Exclude...,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// convert v1alpha1 object to v1 object
|
||||
func ConvertObject(in interface{}) (interface{}, error) {
|
||||
switch src := in.(type) {
|
||||
|
||||
case *v1alpha1.Files:
|
||||
dst := &v1.Files{}
|
||||
if err := ConvertFiles(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
|
||||
case *v1alpha1.Images:
|
||||
dst := &v1.Images{}
|
||||
if err := ConvertImages(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
|
||||
case *v1alpha1.Charts:
|
||||
dst := &v1.Charts{}
|
||||
if err := ConvertCharts(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
|
||||
case *v1alpha1.ThickCharts:
|
||||
dst := &v1.ThickCharts{}
|
||||
if err := ConvertThickCharts(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
|
||||
case *v1alpha1.ImageTxts:
|
||||
dst := &v1.ImageTxts{}
|
||||
if err := ConvertImageTxts(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unsupported object type [%T]", in)
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Charts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ChartSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ChartSpec struct {
|
||||
Charts []Chart `json:"charts,omitempty"`
|
||||
}
|
||||
|
||||
type Chart struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
RepoURL string `json:"repoURL,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
type ThickCharts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ThickChartSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ThickChartSpec struct {
|
||||
Charts []ThickChart `json:"charts,omitempty"`
|
||||
}
|
||||
|
||||
type ThickChart struct {
|
||||
Chart `json:",inline,omitempty"`
|
||||
ExtraImages []ChartImage `json:"extraImages,omitempty"`
|
||||
}
|
||||
|
||||
type ChartImage struct {
|
||||
Reference string `json:"ref"`
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Driver struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec DriverSpec `json:"spec"`
|
||||
}
|
||||
|
||||
type DriverSpec struct {
|
||||
Type string `json:"type"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Files struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec FileSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type FileSpec struct {
|
||||
Files []File `json:"files,omitempty"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
// Path is the path to the file contents, can be a local or remote path
|
||||
Path string `json:"path"`
|
||||
|
||||
// Name is an optional field specifying the name of the file when specified,
|
||||
// it will override any dynamic name discovery from Path
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
var (
|
||||
ContentGroupVersion = schema.GroupVersion{Group: consts.ContentGroup, Version: "v1"}
|
||||
CollectionGroupVersion = schema.GroupVersion{Group: consts.CollectionGroup, Version: "v1"}
|
||||
)
|
||||
@@ -1,40 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Images struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ImageSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ImageSpec struct {
|
||||
Images []Image `json:"images,omitempty"`
|
||||
}
|
||||
|
||||
type Image struct {
|
||||
// Name is the full location for the image, can be referenced by tags or digests
|
||||
Name string `json:"name"`
|
||||
|
||||
// Path is the path to the cosign public key used for verifying image signatures
|
||||
//Key string `json:"key,omitempty"`
|
||||
Key string `json:"key"`
|
||||
|
||||
// Path is the path to the cosign public key used for verifying image signatures
|
||||
//Tlog string `json:"use-tlog-verify,omitempty"`
|
||||
Tlog bool `json:"use-tlog-verify"`
|
||||
|
||||
// cosign keyless validation options
|
||||
CertIdentity string `json:"certificate-identity"`
|
||||
CertIdentityRegexp string `json:"certificate-identity-regexp"`
|
||||
CertOidcIssuer string `json:"certificate-oidc-issuer"`
|
||||
CertOidcIssuerRegexp string `json:"certificate-oidc-issuer-regexp"`
|
||||
CertGithubWorkflowRepository string `json:"certificate-github-workflow-repository"`
|
||||
|
||||
// Platform of the image to be pulled. If not specified, all platforms will be pulled.
|
||||
//Platform string `json:"key,omitempty"`
|
||||
Platform string `json:"platform"`
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type ImageTxts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ImageTxtsSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxtsSpec struct {
|
||||
ImageTxts []ImageTxt `json:"imageTxts,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxt struct {
|
||||
Ref string `json:"ref,omitempty"`
|
||||
Sources ImageTxtSources `json:"sources,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxtSources struct {
|
||||
Include []string `json:"include,omitempty"`
|
||||
Exclude []string `json:"exclude,omitempty"`
|
||||
}
|
||||
@@ -4,6 +4,11 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
ChartsContentKind = "Charts"
|
||||
ChartsCollectionKind = "ThickCharts"
|
||||
)
|
||||
|
||||
type Charts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
@@ -4,6 +4,10 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
DriverContentKind = "Driver"
|
||||
)
|
||||
|
||||
type Driver struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const FilesContentKind = "Files"
|
||||
|
||||
type Files struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
@@ -2,11 +2,17 @@ package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
const (
|
||||
Version = "v1alpha1"
|
||||
ContentGroup = "content.hauler.cattle.io"
|
||||
CollectionGroup = "collection.hauler.cattle.io"
|
||||
)
|
||||
|
||||
var (
|
||||
ContentGroupVersion = schema.GroupVersion{Group: consts.ContentGroup, Version: "v1alpha1"}
|
||||
CollectionGroupVersion = schema.GroupVersion{Group: consts.CollectionGroup, Version: "v1alpha1"}
|
||||
ContentGroupVersion = schema.GroupVersion{Group: ContentGroup, Version: Version}
|
||||
// SchemeBuilder = &scheme.Builder{GroupVersion: ContentGroupVersion}
|
||||
|
||||
CollectionGroupVersion = schema.GroupVersion{Group: CollectionGroup, Version: Version}
|
||||
)
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const ImagesContentKind = "Images"
|
||||
|
||||
type Images struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
@@ -23,17 +25,6 @@ type Image struct {
|
||||
//Key string `json:"key,omitempty"`
|
||||
Key string `json:"key"`
|
||||
|
||||
// Path is the path to the cosign public key used for verifying image signatures
|
||||
//Tlog string `json:"use-tlog-verify,omitempty"`
|
||||
Tlog bool `json:"use-tlog-verify"`
|
||||
|
||||
// cosign keyless validation options
|
||||
CertIdentity string `json:"certificate-identity"`
|
||||
CertIdentityRegexp string `json:"certificate-identity-regexp"`
|
||||
CertOidcIssuer string `json:"certificate-oidc-issuer"`
|
||||
CertOidcIssuerRegexp string `json:"certificate-oidc-issuer-regexp"`
|
||||
CertGithubWorkflowRepository string `json:"certificate-github-workflow-repository"`
|
||||
|
||||
// Platform of the image to be pulled. If not specified, all platforms will be pulled.
|
||||
//Platform string `json:"key,omitempty"`
|
||||
Platform string `json:"platform"`
|
||||
|
||||
@@ -4,6 +4,10 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
ImageTxtsContentKind = "ImageTxts"
|
||||
)
|
||||
|
||||
type ImageTxts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
19
pkg/apis/hauler.cattle.io/v1alpha1/k3s.go
Normal file
19
pkg/apis/hauler.cattle.io/v1alpha1/k3s.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const K3sCollectionKind = "K3s"
|
||||
|
||||
type K3s struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec K3sSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type K3sSpec struct {
|
||||
Version string `json:"version"`
|
||||
Arch string `json:"arch"`
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
package archives
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mholt/archives"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
// maps to handle compression types
|
||||
var CompressionMap = map[string]archives.Compression{
|
||||
"gz": archives.Gz{},
|
||||
"bz2": archives.Bz2{},
|
||||
"xz": archives.Xz{},
|
||||
"zst": archives.Zstd{},
|
||||
"lz4": archives.Lz4{},
|
||||
"br": archives.Brotli{},
|
||||
}
|
||||
|
||||
// maps to handle archival types
|
||||
var ArchivalMap = map[string]archives.Archival{
|
||||
"tar": archives.Tar{},
|
||||
"zip": archives.Zip{},
|
||||
}
|
||||
|
||||
// check if a path exists
|
||||
func isExist(path string) bool {
|
||||
_, statErr := os.Stat(path)
|
||||
return !os.IsNotExist(statErr)
|
||||
}
|
||||
|
||||
// archives the files in a directory
|
||||
// dir: the directory to Archive
|
||||
// outfile: the output file
|
||||
// compression: the compression to use (gzip, bzip2, etc.)
|
||||
// archival: the archival to use (tar, zip, etc.)
|
||||
func Archive(ctx context.Context, dir, outfile string, compression archives.Compression, archival archives.Archival) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.Debugf("starting the archival process for [%s]", dir)
|
||||
|
||||
// remove outfile
|
||||
l.Debugf("removing existing output file: [%s]", outfile)
|
||||
if err := os.RemoveAll(outfile); err != nil {
|
||||
errMsg := fmt.Errorf("failed to remove existing output file [%s]: %w", outfile, err)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
|
||||
if !isExist(dir) {
|
||||
errMsg := fmt.Errorf("directory [%s] does not exist, cannot proceed with archival", dir)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
|
||||
// map files on disk to their paths in the archive
|
||||
l.Debugf("mapping files in directory [%s]", dir)
|
||||
archiveDirName := filepath.Base(filepath.Clean(dir))
|
||||
if dir == "." {
|
||||
archiveDirName = ""
|
||||
}
|
||||
files, err := archives.FilesFromDisk(context.Background(), nil, map[string]string{
|
||||
dir: archiveDirName,
|
||||
})
|
||||
if err != nil {
|
||||
errMsg := fmt.Errorf("error mapping files from directory [%s]: %w", dir, err)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
l.Debugf("successfully mapped files for directory [%s]", dir)
|
||||
|
||||
// create the output file we'll write to
|
||||
l.Debugf("creating output file [%s]", outfile)
|
||||
outf, err := os.Create(outfile)
|
||||
if err != nil {
|
||||
errMsg := fmt.Errorf("error creating output file [%s]: %w", outfile, err)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
defer func() {
|
||||
l.Debugf("closing output file [%s]", outfile)
|
||||
outf.Close()
|
||||
}()
|
||||
|
||||
// define the archive format
|
||||
l.Debugf("defining the archive format: [%T]/[%T]", archival, compression)
|
||||
format := archives.CompressedArchive{
|
||||
Compression: compression,
|
||||
Archival: archival,
|
||||
}
|
||||
|
||||
// create the archive
|
||||
l.Debugf("starting archive for [%s]", outfile)
|
||||
err = format.Archive(context.Background(), outf, files)
|
||||
if err != nil {
|
||||
errMsg := fmt.Errorf("error during archive creation for output file [%s]: %w", outfile, err)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
l.Debugf("archive created successfully [%s]", outfile)
|
||||
return nil
|
||||
}
|
||||
@@ -1,158 +0,0 @@
|
||||
package archives
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/archives"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
const (
|
||||
dirPermissions = 0o700 // default directory permissions
|
||||
filePermissions = 0o600 // default file permissions
|
||||
)
|
||||
|
||||
// ensures the path is safely relative to the target directory
|
||||
func securePath(basePath, relativePath string) (string, error) {
|
||||
relativePath = filepath.Clean("/" + relativePath)
|
||||
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
|
||||
|
||||
dstPath := filepath.Join(basePath, relativePath)
|
||||
|
||||
if !strings.HasPrefix(filepath.Clean(dstPath)+string(os.PathSeparator), filepath.Clean(basePath)+string(os.PathSeparator)) {
|
||||
return "", fmt.Errorf("illegal file path: %s", dstPath)
|
||||
}
|
||||
return dstPath, nil
|
||||
}
|
||||
|
||||
// creates a directory with specified permissions
|
||||
func createDirWithPermissions(ctx context.Context, path string, mode os.FileMode) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.Debugf("creating directory [%s]", path)
|
||||
if err := os.MkdirAll(path, mode); err != nil {
|
||||
return fmt.Errorf("failed to mkdir: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets permissions to a file or directory
|
||||
func setPermissions(path string, mode os.FileMode) error {
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return fmt.Errorf("failed to chmod: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handles the extraction of a file from the archive.
|
||||
func handleFile(ctx context.Context, f archives.FileInfo, dst string) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.Debugf("handling file [%s]", f.NameInArchive)
|
||||
|
||||
// validate and construct the destination path
|
||||
dstPath, pathErr := securePath(dst, f.NameInArchive)
|
||||
if pathErr != nil {
|
||||
return pathErr
|
||||
}
|
||||
|
||||
// ensure the parent directory exists
|
||||
parentDir := filepath.Dir(dstPath)
|
||||
if dirErr := createDirWithPermissions(ctx, parentDir, dirPermissions); dirErr != nil {
|
||||
return dirErr
|
||||
}
|
||||
|
||||
// handle directories
|
||||
if f.IsDir() {
|
||||
// create the directory with permissions from the archive
|
||||
if dirErr := createDirWithPermissions(ctx, dstPath, f.Mode()); dirErr != nil {
|
||||
return fmt.Errorf("failed to create directory: %w", dirErr)
|
||||
}
|
||||
l.Debugf("successfully created directory [%s]", dstPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ignore symlinks (or hardlinks)
|
||||
if f.LinkTarget != "" {
|
||||
l.Debugf("skipping symlink [%s] to [%s]", dstPath, f.LinkTarget)
|
||||
return nil
|
||||
}
|
||||
|
||||
// check and handle parent directory permissions
|
||||
originalMode, statErr := os.Stat(parentDir)
|
||||
if statErr != nil {
|
||||
return fmt.Errorf("failed to stat parent directory: %w", statErr)
|
||||
}
|
||||
|
||||
// if parent directory is read only, temporarily make it writable
|
||||
if originalMode.Mode().Perm()&0o200 == 0 {
|
||||
l.Debugf("parent directory is read only... temporarily making it writable [%s]", parentDir)
|
||||
if chmodErr := os.Chmod(parentDir, originalMode.Mode()|0o200); chmodErr != nil {
|
||||
return fmt.Errorf("failed to chmod parent directory: %w", chmodErr)
|
||||
}
|
||||
defer func() {
|
||||
// restore the original permissions after writing
|
||||
if chmodErr := os.Chmod(parentDir, originalMode.Mode()); chmodErr != nil {
|
||||
l.Debugf("failed to restore original permissions for [%s]: %v", parentDir, chmodErr)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// handle regular files
|
||||
reader, openErr := f.Open()
|
||||
if openErr != nil {
|
||||
return fmt.Errorf("failed to open file: %w", openErr)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
dstFile, createErr := os.OpenFile(dstPath, os.O_CREATE|os.O_WRONLY, f.Mode())
|
||||
if createErr != nil {
|
||||
return fmt.Errorf("failed to create file: %w", createErr)
|
||||
}
|
||||
defer dstFile.Close()
|
||||
|
||||
if _, copyErr := io.Copy(dstFile, reader); copyErr != nil {
|
||||
return fmt.Errorf("failed to copy: %w", copyErr)
|
||||
}
|
||||
l.Debugf("successfully extracted file [%s]", dstPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// unarchives a tarball to a directory, symlinks, and hardlinks are ignored
|
||||
func Unarchive(ctx context.Context, tarball, dst string) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.Debugf("unarchiving temporary archive [%s] to temporary store [%s]", tarball, dst)
|
||||
archiveFile, openErr := os.Open(tarball)
|
||||
if openErr != nil {
|
||||
return fmt.Errorf("failed to open tarball %s: %w", tarball, openErr)
|
||||
}
|
||||
defer archiveFile.Close()
|
||||
|
||||
format, input, identifyErr := archives.Identify(context.Background(), tarball, archiveFile)
|
||||
if identifyErr != nil {
|
||||
return fmt.Errorf("failed to identify format: %w", identifyErr)
|
||||
}
|
||||
|
||||
extractor, ok := format.(archives.Extractor)
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported format for extraction")
|
||||
}
|
||||
|
||||
if dirErr := createDirWithPermissions(ctx, dst, dirPermissions); dirErr != nil {
|
||||
return fmt.Errorf("failed to create destination directory: %w", dirErr)
|
||||
}
|
||||
|
||||
handler := func(ctx context.Context, f archives.FileInfo) error {
|
||||
return handleFile(ctx, f, dst)
|
||||
}
|
||||
|
||||
if extractErr := extractor.Extract(context.Background(), input, handler); extractErr != nil {
|
||||
return fmt.Errorf("failed to extract: %w", extractErr)
|
||||
}
|
||||
|
||||
l.Infof("unarchiving completed successfully")
|
||||
return nil
|
||||
}
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
var _ partial.Describable = (*marshallableConfig)(nil)
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
gtypes "github.com/google/go-containerregistry/pkg/v1/types"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
// interface guard
|
||||
|
||||
@@ -13,9 +13,9 @@ import (
|
||||
|
||||
"github.com/spf13/afero"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts/file"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type directory struct {
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type File struct{}
|
||||
@@ -11,9 +11,9 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"oras.land/oras-go/pkg/content"
|
||||
|
||||
content2 "hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/layer"
|
||||
content2 "github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/layer"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
|
||||
)
|
||||
|
||||
func TestClient_Detect(t *testing.T) {
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type Http struct{}
|
||||
@@ -1,8 +1,8 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
|
||||
)
|
||||
|
||||
type Option func(*File)
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
gv1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
)
|
||||
|
||||
var _ artifacts.OCI = (*Image)(nil)
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"github.com/google/go-containerregistry/pkg/v1/static"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
var _ artifacts.OCI = (*Memory)(nil)
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts/memory"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/memory"
|
||||
)
|
||||
|
||||
func TestMemory_Layers(t *testing.T) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package memory
|
||||
|
||||
import "hauler.dev/go/hauler/pkg/artifacts"
|
||||
import "github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
|
||||
type Option func(*Memory)
|
||||
|
||||
|
||||
@@ -3,11 +3,11 @@ package chart
|
||||
import (
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/image"
|
||||
"hauler.dev/go/hauler/pkg/content/chart"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/image"
|
||||
"github.com/rancherfederal/hauler/pkg/content/chart"
|
||||
"github.com/rancherfederal/hauler/pkg/reference"
|
||||
)
|
||||
|
||||
var _ artifacts.OCICollection = (*tchart)(nil)
|
||||
@@ -15,13 +15,13 @@ var _ artifacts.OCICollection = (*tchart)(nil)
|
||||
// tchart is a thick chart that includes all the dependent images as well as the chart itself
|
||||
type tchart struct {
|
||||
chart *chart.Chart
|
||||
config v1.ThickChart
|
||||
config v1alpha1.ThickChart
|
||||
|
||||
computed bool
|
||||
contents map[string]artifacts.OCI
|
||||
}
|
||||
|
||||
func NewThickChart(cfg v1.ThickChart, opts *action.ChartPathOptions) (artifacts.OCICollection, error) {
|
||||
func NewThickChart(cfg v1alpha1.ThickChart, opts *action.ChartPathOptions) (artifacts.OCICollection, error) {
|
||||
o, err := chart.NewChart(cfg.Chart.Name, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/client-go/util/jsonpath"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
)
|
||||
|
||||
var defaultKnownImagePaths = []string{
|
||||
@@ -29,13 +29,13 @@ var defaultKnownImagePaths = []string{
|
||||
}
|
||||
|
||||
// ImagesInChart will render a chart and identify all dependent images from it
|
||||
func ImagesInChart(c *helmchart.Chart) (v1.Images, error) {
|
||||
func ImagesInChart(c *helmchart.Chart) (v1alpha1.Images, error) {
|
||||
docs, err := template(c)
|
||||
if err != nil {
|
||||
return v1.Images{}, err
|
||||
return v1alpha1.Images{}, err
|
||||
}
|
||||
|
||||
var images []v1.Image
|
||||
var images []v1alpha1.Image
|
||||
reader := yaml.NewYAMLReader(bufio.NewReader(strings.NewReader(docs)))
|
||||
for {
|
||||
raw, err := reader.Read()
|
||||
@@ -43,17 +43,17 @@ func ImagesInChart(c *helmchart.Chart) (v1.Images, error) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return v1.Images{}, err
|
||||
return v1alpha1.Images{}, err
|
||||
}
|
||||
|
||||
found := find(raw, defaultKnownImagePaths...)
|
||||
for _, f := range found {
|
||||
images = append(images, v1.Image{Name: f})
|
||||
images = append(images, v1alpha1.Image{Name: f})
|
||||
}
|
||||
}
|
||||
|
||||
ims := v1.Images{
|
||||
Spec: v1.ImageSpec{
|
||||
ims := v1alpha1.Images{
|
||||
Spec: v1alpha1.ImageSpec{
|
||||
Images: images,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -11,10 +11,10 @@ import (
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
|
||||
artifact "hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/image"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
artifact "github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/image"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
)
|
||||
|
||||
type ImageTxt struct {
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/image"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/image"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
175
pkg/collection/k3s/k3s.go
Normal file
175
pkg/collection/k3s/k3s.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package k3s
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/image"
|
||||
"github.com/rancherfederal/hauler/pkg/reference"
|
||||
)
|
||||
|
||||
var _ artifacts.OCICollection = (*k3s)(nil)
|
||||
|
||||
const (
|
||||
releaseUrl = "https://github.com/k3s-io/k3s/releases/download"
|
||||
channelUrl = "https://update.k3s.io/v1-release/channels"
|
||||
bootstrapUrl = "https://get.k3s.io"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrImagesNotFound = errors.New("k3s dependent images not found")
|
||||
ErrFetchingImages = errors.New("failed to fetch k3s dependent images")
|
||||
ErrExecutableNotfound = errors.New("k3s executable not found")
|
||||
ErrChannelNotFound = errors.New("desired k3s channel not found")
|
||||
)
|
||||
|
||||
type k3s struct {
|
||||
version string
|
||||
arch string
|
||||
|
||||
computed bool
|
||||
contents map[string]artifacts.OCI
|
||||
channels map[string]string
|
||||
client *getter.Client
|
||||
}
|
||||
|
||||
func NewK3s(version string) (artifacts.OCICollection, error) {
|
||||
return &k3s{
|
||||
version: version,
|
||||
contents: make(map[string]artifacts.OCI),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (k *k3s) Contents() (map[string]artifacts.OCI, error) {
|
||||
if err := k.compute(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return k.contents, nil
|
||||
}
|
||||
|
||||
func (k *k3s) compute() error {
|
||||
if k.computed {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := k.fetchChannels(); err == nil {
|
||||
if version, ok := k.channels[k.version]; ok {
|
||||
k.version = version
|
||||
}
|
||||
}
|
||||
|
||||
if err := k.images(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := k.executable(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := k.bootstrap(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
k.computed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *k3s) executable() error {
|
||||
n := "k3s"
|
||||
if k.arch != "" && k.arch != "amd64" {
|
||||
n = fmt.Sprintf("name-%s", k.arch)
|
||||
}
|
||||
fref := k.releaseUrl(n)
|
||||
|
||||
resp, err := http.Head(fref)
|
||||
if resp.StatusCode != http.StatusOK || err != nil {
|
||||
return ErrExecutableNotfound
|
||||
}
|
||||
|
||||
f := file.NewFile(fref)
|
||||
|
||||
ref := fmt.Sprintf("%s/k3s:%s", reference.DefaultNamespace, k.dnsCompliantVersion())
|
||||
k.contents[ref] = f
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *k3s) bootstrap() error {
|
||||
c := getter.NewClient(getter.ClientOptions{NameOverride: "k3s-init.sh"})
|
||||
f := file.NewFile(bootstrapUrl, file.WithClient(c))
|
||||
|
||||
ref := fmt.Sprintf("%s/k3s-init.sh:%s", reference.DefaultNamespace, reference.DefaultTag)
|
||||
k.contents[ref] = f
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *k3s) images() error {
|
||||
resp, err := http.Get(k.releaseUrl("k3s-images.txt"))
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ErrFetchingImages
|
||||
} else if err != nil {
|
||||
return ErrImagesNotFound
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
for scanner.Scan() {
|
||||
reference := scanner.Text()
|
||||
o, err := image.NewImage(reference)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
k.contents[reference] = o
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *k3s) releaseUrl(artifact string) string {
|
||||
u, _ := url.Parse(releaseUrl)
|
||||
complete := []string{u.Path}
|
||||
u.Path = path.Join(append(complete, []string{k.version, artifact}...)...)
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (k *k3s) dnsCompliantVersion() string {
|
||||
return strings.ReplaceAll(k.version, "+", "-")
|
||||
}
|
||||
|
||||
func (k *k3s) fetchChannels() error {
|
||||
resp, err := http.Get(channelUrl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var c channel
|
||||
if err := json.NewDecoder(resp.Body).Decode(&c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
channels := make(map[string]string)
|
||||
for _, ch := range c.Data {
|
||||
channels[ch.Name] = ch.Latest
|
||||
}
|
||||
|
||||
k.channels = channels
|
||||
return nil
|
||||
}
|
||||
|
||||
type channel struct {
|
||||
Data []channelData `json:"data"`
|
||||
}
|
||||
|
||||
type channelData struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Latest string `json:"latest"`
|
||||
}
|
||||
@@ -1,99 +1,57 @@
|
||||
package consts
|
||||
|
||||
const (
|
||||
// container media types
|
||||
OCIManifestSchema1 = "application/vnd.oci.image.manifest.v1+json"
|
||||
DockerManifestSchema2 = "application/vnd.docker.distribution.manifest.v2+json"
|
||||
DockerManifestListSchema2 = "application/vnd.docker.distribution.manifest.list.v2+json"
|
||||
OCIImageIndexSchema = "application/vnd.oci.image.index.v1+json"
|
||||
DockerConfigJSON = "application/vnd.docker.container.image.v1+json"
|
||||
DockerLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||
DockerForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
|
||||
DockerUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
|
||||
OCILayer = "application/vnd.oci.image.layer.v1.tar+gzip"
|
||||
OCIArtifact = "application/vnd.oci.empty.v1+json"
|
||||
|
||||
// helm chart media types
|
||||
DockerConfigJSON = "application/vnd.docker.container.image.v1+json"
|
||||
DockerLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||
DockerForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
|
||||
DockerUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
|
||||
OCILayer = "application/vnd.oci.image.layer.v1.tar+gzip"
|
||||
OCIArtifact = "application/vnd.oci.empty.v1+json"
|
||||
|
||||
// ChartConfigMediaType is the reserved media type for the Helm chart manifest config
|
||||
ChartConfigMediaType = "application/vnd.cncf.helm.config.v1+json"
|
||||
ChartLayerMediaType = "application/vnd.cncf.helm.chart.content.v1.tar+gzip"
|
||||
ProvLayerMediaType = "application/vnd.cncf.helm.chart.provenance.v1.prov"
|
||||
|
||||
// file media types
|
||||
FileLayerMediaType = "application/vnd.content.hauler.file.layer.v1"
|
||||
// ChartLayerMediaType is the reserved media type for Helm chart package content
|
||||
ChartLayerMediaType = "application/vnd.cncf.helm.chart.content.v1.tar+gzip"
|
||||
|
||||
// ProvLayerMediaType is the reserved media type for Helm chart provenance files
|
||||
ProvLayerMediaType = "application/vnd.cncf.helm.chart.provenance.v1.prov"
|
||||
|
||||
// FileLayerMediaType is the reserved media type for File content layers
|
||||
FileLayerMediaType = "application/vnd.content.hauler.file.layer.v1"
|
||||
|
||||
// FileLocalConfigMediaType is the reserved media type for File config
|
||||
FileLocalConfigMediaType = "application/vnd.content.hauler.file.local.config.v1+json"
|
||||
FileDirectoryConfigMediaType = "application/vnd.content.hauler.file.directory.config.v1+json"
|
||||
FileHttpConfigMediaType = "application/vnd.content.hauler.file.http.config.v1+json"
|
||||
|
||||
// memory media types
|
||||
// MemoryConfigMediaType is the reserved media type for Memory config for a generic set of bytes stored in memory
|
||||
MemoryConfigMediaType = "application/vnd.content.hauler.memory.config.v1+json"
|
||||
|
||||
// wasm media types
|
||||
// WasmArtifactLayerMediaType is the reserved media type for WASM artifact layers
|
||||
WasmArtifactLayerMediaType = "application/vnd.wasm.content.layer.v1+wasm"
|
||||
WasmConfigMediaType = "application/vnd.wasm.config.v1+json"
|
||||
|
||||
// unknown media types
|
||||
// WasmConfigMediaType is the reserved media type for WASM configs
|
||||
WasmConfigMediaType = "application/vnd.wasm.config.v1+json"
|
||||
|
||||
UnknownManifest = "application/vnd.hauler.cattle.io.unknown.v1+json"
|
||||
UnknownLayer = "application/vnd.content.hauler.unknown.layer"
|
||||
Unknown = "unknown"
|
||||
|
||||
// vendor prefixes
|
||||
OCIVendorPrefix = "vnd.oci"
|
||||
DockerVendorPrefix = "vnd.docker"
|
||||
HaulerVendorPrefix = "vnd.hauler"
|
||||
OCIImageIndexFile = "index.json"
|
||||
|
||||
// annotation keys
|
||||
KindAnnotationName = "kind"
|
||||
KindAnnotationImage = "dev.cosignproject.cosign/image"
|
||||
KindAnnotationIndex = "dev.cosignproject.cosign/imageIndex"
|
||||
KindAnnotationName = "kind"
|
||||
KindAnnotation = "dev.cosignproject.cosign/image"
|
||||
|
||||
CarbideRegistry = "rgcrprod.azurecr.us"
|
||||
ImageAnnotationKey = "hauler.dev/key"
|
||||
ImageAnnotationPlatform = "hauler.dev/platform"
|
||||
ImageAnnotationRegistry = "hauler.dev/registry"
|
||||
ImageAnnotationTlog = "hauler.dev/use-tlog-verify"
|
||||
|
||||
// cosign keyless validation options
|
||||
ImageAnnotationCertIdentity = "hauler.dev/certificate-identity"
|
||||
ImageAnnotationCertIdentityRegexp = "hauler.dev/certificate-identity-regexp"
|
||||
ImageAnnotationCertOidcIssuer = "hauler.dev/certificate-oidc-issuer"
|
||||
ImageAnnotationCertOidcIssuerRegexp = "hauler.dev/certificate-oidc-issuer-regexp"
|
||||
ImageAnnotationCertGithubWorkflowRepository = "hauler.dev/certificate-github-workflow-repository"
|
||||
|
||||
// content kinds
|
||||
ImagesContentKind = "Images"
|
||||
ChartsContentKind = "Charts"
|
||||
FilesContentKind = "Files"
|
||||
DriverContentKind = "Driver"
|
||||
ImageTxtsContentKind = "ImageTxts"
|
||||
ChartsCollectionKind = "ThickCharts"
|
||||
|
||||
// content groups
|
||||
ContentGroup = "content.hauler.cattle.io"
|
||||
CollectionGroup = "collection.hauler.cattle.io"
|
||||
|
||||
// environment variables
|
||||
HaulerDir = "HAULER_DIR"
|
||||
HaulerTempDir = "HAULER_TEMP_DIR"
|
||||
HaulerStoreDir = "HAULER_STORE_DIR"
|
||||
HaulerIgnoreErrors = "HAULER_IGNORE_ERRORS"
|
||||
|
||||
// container files and directories
|
||||
ImageManifestFile = "manifest.json"
|
||||
ImageConfigFile = "config.json"
|
||||
|
||||
// other constraints
|
||||
CarbideRegistry = "rgcrprod.azurecr.us"
|
||||
DefaultNamespace = "hauler"
|
||||
DefaultTag = "latest"
|
||||
DefaultStoreName = "store"
|
||||
DefaultHaulerDirName = ".hauler"
|
||||
DefaultHaulerTempDirName = "hauler"
|
||||
DefaultRegistryRootDir = "registry"
|
||||
DefaultRegistryPort = 5000
|
||||
DefaultFileserverRootDir = "fileserver"
|
||||
DefaultFileserverPort = 8080
|
||||
DefaultFileserverTimeout = 60
|
||||
DefaultHaulerArchiveName = "haul.tar.zst"
|
||||
DefaultHaulerManifestName = "hauler-manifest.yaml"
|
||||
DefaultRetries = 3
|
||||
RetriesInterval = 5
|
||||
CustomTimeFormat = "2006-01-02 15:04:05"
|
||||
)
|
||||
|
||||
@@ -16,16 +16,16 @@ import (
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
gtypes "github.com/google/go-containerregistry/pkg/v1/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
"helm.sh/helm/v3/pkg/chart/loader"
|
||||
"helm.sh/helm/v3/pkg/cli"
|
||||
"helm.sh/helm/v3/pkg/registry"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/layer"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/layer"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -40,7 +40,7 @@ type Chart struct {
|
||||
annotations map[string]string
|
||||
}
|
||||
|
||||
// NewChart is a helper method that returns NewLocalChart or NewRemoteChart depending on chart contents
|
||||
// NewChart is a helper method that returns NewLocalChart or NewRemoteChart depending on v1alpha1.Chart contents
|
||||
func NewChart(name string, opts *action.ChartPathOptions) (*Chart, error) {
|
||||
chartRef := name
|
||||
actionConfig := new(action.Configuration)
|
||||
|
||||
@@ -9,16 +9,16 @@ import (
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content/chart"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/content/chart"
|
||||
)
|
||||
|
||||
func TestNewChart(t *testing.T) {
|
||||
tempDir, err := os.MkdirTemp("", "hauler")
|
||||
tmpdir, err := os.MkdirTemp("", "hauler")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
type args struct {
|
||||
name string
|
||||
@@ -33,18 +33,18 @@ func TestNewChart(t *testing.T) {
|
||||
{
|
||||
name: "should create from a chart archive",
|
||||
args: args{
|
||||
name: "rancher-cluster-templates-0.5.2.tgz",
|
||||
name: "rancher-cluster-templates-0.4.4.tgz",
|
||||
opts: &action.ChartPathOptions{RepoURL: "../../../testdata"},
|
||||
},
|
||||
want: v1.Descriptor{
|
||||
MediaType: consts.ChartLayerMediaType,
|
||||
Size: 14970,
|
||||
Size: 13102,
|
||||
Digest: v1.Hash{
|
||||
Algorithm: "sha256",
|
||||
Hex: "0905de044a6e57cf3cd27bfc8482753049920050b10347ae2315599bd982a0e3",
|
||||
Hex: "4b3bb4e474b54bf9057b298f8f11c239bb561396716d8cd5fc369c407fba2965",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "rancher-cluster-templates-0.5.2.tgz",
|
||||
ocispec.AnnotationTitle: "rancher-cluster-templates-0.4.4.tgz",
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
@@ -53,7 +53,7 @@ func TestNewChart(t *testing.T) {
|
||||
// {
|
||||
// name: "should create from a chart directory",
|
||||
// args: args{
|
||||
// path: filepath.Join(tempDir, "podinfo"),
|
||||
// path: filepath.Join(tmpdir, "podinfo"),
|
||||
// },
|
||||
// want: want,
|
||||
// wantErr: false,
|
||||
@@ -63,17 +63,17 @@ func TestNewChart(t *testing.T) {
|
||||
name: "should fetch a remote chart",
|
||||
args: args{
|
||||
name: "cert-manager",
|
||||
opts: &action.ChartPathOptions{RepoURL: "https://charts.jetstack.io", Version: "1.15.3"},
|
||||
opts: &action.ChartPathOptions{RepoURL: "https://charts.jetstack.io", Version: "1.14.4"},
|
||||
},
|
||||
want: v1.Descriptor{
|
||||
MediaType: consts.ChartLayerMediaType,
|
||||
Size: 94751,
|
||||
Size: 80674,
|
||||
Digest: v1.Hash{
|
||||
Algorithm: "sha256",
|
||||
Hex: "016e68d9f7083d2c4fd302f951ee6490dbf4cb1ef44cfc06914c39cbfb01d858",
|
||||
Hex: "5775fdbc1881d6e510df76d38753af54b86bd14caa8edb28fdbb79527042dede",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "cert-manager-v1.15.3.tgz",
|
||||
ocispec.AnnotationTitle: "cert-manager-v1.14.4.tgz",
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
|
||||
@@ -7,31 +7,17 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
v1alpha1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
)
|
||||
|
||||
func Load(data []byte) (schema.ObjectKind, error) {
|
||||
var tm metav1.TypeMeta
|
||||
if err := yaml.Unmarshal(data, &tm); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse manifest: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if tm.APIVersion == "" {
|
||||
return nil, fmt.Errorf("missing required manifest field [apiVersion]")
|
||||
}
|
||||
|
||||
if tm.Kind == "" {
|
||||
return nil, fmt.Errorf("missing required manifest field [kind]")
|
||||
}
|
||||
|
||||
gv := tm.GroupVersionKind().GroupVersion()
|
||||
// allow v1 and v1alpha1 content/collection
|
||||
if gv != v1.ContentGroupVersion &&
|
||||
gv != v1.CollectionGroupVersion &&
|
||||
gv != v1alpha1.ContentGroupVersion &&
|
||||
gv != v1alpha1.CollectionGroupVersion {
|
||||
return nil, fmt.Errorf("unrecognized content/collection [%s] with [kind=%s]", tm.APIVersion, tm.Kind)
|
||||
if tm.GroupVersionKind().GroupVersion() != v1alpha1.ContentGroupVersion && tm.GroupVersionKind().GroupVersion() != v1alpha1.CollectionGroupVersion {
|
||||
return nil, fmt.Errorf("unrecognized content/collection type: %s", tm.GroupVersionKind().String())
|
||||
}
|
||||
|
||||
return &tm, nil
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -11,8 +12,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
|
||||
ccontent "github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
@@ -20,8 +19,8 @@ import (
|
||||
"oras.land/oras-go/pkg/content"
|
||||
"oras.land/oras-go/pkg/target"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/reference"
|
||||
)
|
||||
|
||||
var _ target.Target = (*OCI)(nil)
|
||||
@@ -66,7 +65,7 @@ func (o *OCI) AddIndex(desc ocispec.Descriptor) error {
|
||||
|
||||
// LoadIndex will load the index from disk
|
||||
func (o *OCI) LoadIndex() error {
|
||||
path := o.path(ocispec.ImageIndexFile)
|
||||
path := o.path(consts.OCIImageIndexFile)
|
||||
idx, err := os.Open(path)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
@@ -125,9 +124,9 @@ func (o *OCI) SaveIndex() error {
|
||||
kindJ := descs[j].Annotations["kind"]
|
||||
|
||||
// Objects with the prefix of "dev.cosignproject.cosign/image" should be at the top.
|
||||
if strings.HasPrefix(kindI, consts.KindAnnotationImage) && !strings.HasPrefix(kindJ, consts.KindAnnotationImage) {
|
||||
if strings.HasPrefix(kindI, consts.KindAnnotation) && !strings.HasPrefix(kindJ, consts.KindAnnotation) {
|
||||
return true
|
||||
} else if !strings.HasPrefix(kindI, consts.KindAnnotationImage) && strings.HasPrefix(kindJ, consts.KindAnnotationImage) {
|
||||
} else if !strings.HasPrefix(kindI, consts.KindAnnotation) && strings.HasPrefix(kindJ, consts.KindAnnotation) {
|
||||
return false
|
||||
}
|
||||
return false // Default: maintain the order.
|
||||
@@ -138,7 +137,7 @@ func (o *OCI) SaveIndex() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(o.path(ocispec.ImageIndexFile), data, 0644)
|
||||
return os.WriteFile(o.path(consts.OCIImageIndexFile), data, 0644)
|
||||
}
|
||||
|
||||
// Resolve attempts to resolve the reference into a name and descriptor.
|
||||
@@ -259,7 +258,7 @@ func (o *OCI) blobWriterAt(desc ocispec.Descriptor) (*os.File, error) {
|
||||
}
|
||||
|
||||
func (o *OCI) ensureBlob(alg string, hex string) (string, error) {
|
||||
dir := o.path(ocispec.ImageBlobsDir, alg)
|
||||
dir := o.path("blobs", alg)
|
||||
if err := os.MkdirAll(dir, os.ModePerm); err != nil && !os.IsExist(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -1,101 +1,55 @@
|
||||
package cosign
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"embed"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sigstore/cosign/v2/cmd/cosign/cli"
|
||||
"github.com/sigstore/cosign/v2/cmd/cosign/cli/options"
|
||||
"github.com/sigstore/cosign/v2/cmd/cosign/cli/verify"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/image"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
"oras.land/oras-go/pkg/content"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/image"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
)
|
||||
|
||||
// VerifySignature verifies the digital signature of a file using Sigstore/Cosign.
|
||||
func VerifySignature(ctx context.Context, s *store.Layout, keyPath string, useTlog bool, ref string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
const maxRetries = 3
|
||||
const retryDelay = time.Second * 5
|
||||
|
||||
// VerifyFileSignature verifies the digital signature of a file using Sigstore/Cosign.
|
||||
func VerifySignature(ctx context.Context, s *store.Layout, keyPath string, ref string) error {
|
||||
operation := func() error {
|
||||
v := &verify.VerifyCommand{
|
||||
KeyRef: keyPath,
|
||||
IgnoreTlog: true, // Ignore transparency log by default.
|
||||
}
|
||||
|
||||
// if the user wants to use the transparency log, set the flag to false
|
||||
if useTlog {
|
||||
v.IgnoreTlog = false
|
||||
}
|
||||
|
||||
err := log.CaptureOutput(l, true, func() error {
|
||||
return v.Exec(ctx, []string{ref})
|
||||
})
|
||||
cosignBinaryPath, err := getCosignPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := exec.Command(cosignBinaryPath, "verify", "--insecure-ignore-tlog", "--key", keyPath, ref)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error verifying signature: %v, output: %s", err, output)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return RetryOperation(ctx, rso, ro, operation)
|
||||
}
|
||||
|
||||
// VerifyKeylessSignature verifies the digital signature of a file using Sigstore/Cosign.
|
||||
func VerifyKeylessSignature(ctx context.Context, s *store.Layout, identity string, identityRegexp string, oidcIssuer string, oidcIssuerRegexp string, ghWorkflowRepository string, useTlog bool, ref string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
operation := func() error {
|
||||
|
||||
certVerifyOptions := options.CertVerifyOptions{
|
||||
CertOidcIssuer: oidcIssuer,
|
||||
CertOidcIssuerRegexp: oidcIssuer,
|
||||
CertIdentity: identity,
|
||||
CertIdentityRegexp: identityRegexp,
|
||||
CertGithubWorkflowRepository: ghWorkflowRepository,
|
||||
}
|
||||
|
||||
v := &verify.VerifyCommand{
|
||||
CertVerifyOptions: certVerifyOptions,
|
||||
IgnoreTlog: false, // Ignore transparency log is set to false by default for keyless signature verification
|
||||
CertGithubWorkflowRepository: ghWorkflowRepository,
|
||||
}
|
||||
|
||||
// if the user wants to use the transparency log, set the flag to false
|
||||
if useTlog {
|
||||
v.IgnoreTlog = false
|
||||
}
|
||||
|
||||
err := log.CaptureOutput(l, true, func() error {
|
||||
return v.Exec(ctx, []string{ref})
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return RetryOperation(ctx, rso, ro, operation)
|
||||
return RetryOperation(ctx, operation)
|
||||
}
|
||||
|
||||
// SaveImage saves image and any signatures/attestations to the store.
|
||||
func SaveImage(ctx context.Context, s *store.Layout, ref string, platform string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
func SaveImage(ctx context.Context, s *store.Layout, ref string, platform string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if !ro.IgnoreErrors {
|
||||
envVar := os.Getenv(consts.HaulerIgnoreErrors)
|
||||
if envVar == "true" {
|
||||
ro.IgnoreErrors = true
|
||||
}
|
||||
}
|
||||
|
||||
operation := func() error {
|
||||
o := &options.SaveOptions{
|
||||
Directory: s.Root,
|
||||
cosignBinaryPath, err := getCosignPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check to see if the image is multi-arch
|
||||
@@ -103,59 +57,114 @@ func SaveImage(ctx context.Context, s *store.Layout, ref string, platform string
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Debugf("multi-arch image [%v]", isMultiArch)
|
||||
l.Debugf("multi-arch image: %v", isMultiArch)
|
||||
|
||||
cmd := exec.Command(cosignBinaryPath, "save", ref, "--dir", s.Root)
|
||||
// Conditionally add platform.
|
||||
if platform != "" && isMultiArch {
|
||||
l.Debugf("platform for image [%s]", platform)
|
||||
o.Platform = platform
|
||||
cmd.Args = append(cmd.Args, "--platform", platform)
|
||||
}
|
||||
|
||||
err = cli.SaveCmd(ctx, *o, ref)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// start the command after having set up the pipe
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// read command's stdout line by line
|
||||
output := bufio.NewScanner(stdout)
|
||||
for output.Scan() {
|
||||
l.Debugf(output.Text()) // write each line to your log, or anything you need
|
||||
}
|
||||
if err := output.Err(); err != nil {
|
||||
cmd.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// read command's stderr line by line
|
||||
errors := bufio.NewScanner(stderr)
|
||||
for errors.Scan() {
|
||||
l.Warnf(errors.Text()) // write each line to your log, or anything you need
|
||||
}
|
||||
if err := errors.Err(); err != nil {
|
||||
cmd.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for the command to finish
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
return RetryOperation(ctx, rso, ro, operation)
|
||||
return RetryOperation(ctx, operation)
|
||||
}
|
||||
|
||||
// LoadImage loads store to a remote registry.
|
||||
func LoadImages(ctx context.Context, s *store.Layout, registry string, only string, ropts content.RegistryOptions, ro *flags.CliRootOpts) error {
|
||||
func LoadImages(ctx context.Context, s *store.Layout, registry string, ropts content.RegistryOptions) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
o := &options.LoadOptions{
|
||||
Directory: s.Root,
|
||||
Registry: options.RegistryOptions{
|
||||
Name: registry,
|
||||
},
|
||||
cosignBinaryPath, err := getCosignPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Conditionally add extra flags.
|
||||
if len(only) > 0 {
|
||||
o.LoadOnly = only
|
||||
}
|
||||
cmd := exec.Command(cosignBinaryPath, "load", "--registry", registry, "--dir", s.Root)
|
||||
|
||||
// Conditionally add extra registry flags.
|
||||
if ropts.Insecure {
|
||||
o.Registry.AllowInsecure = true
|
||||
cmd.Args = append(cmd.Args, "--allow-insecure-registry=true")
|
||||
}
|
||||
|
||||
if ropts.PlainHTTP {
|
||||
o.Registry.AllowHTTPRegistry = true
|
||||
cmd.Args = append(cmd.Args, "--allow-http-registry=true")
|
||||
}
|
||||
|
||||
if ropts.Username != "" {
|
||||
o.Registry.AuthConfig.Username = ropts.Username
|
||||
o.Registry.AuthConfig.Password = ropts.Password
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// start the command after having set up the pipe
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// execute the cosign load and capture the output in our logger
|
||||
err := log.CaptureOutput(l, false, func() error {
|
||||
return cli.LoadCmd(ctx, *o, "")
|
||||
})
|
||||
// read command's stdout line by line
|
||||
output := bufio.NewScanner(stdout)
|
||||
for output.Scan() {
|
||||
l.Infof(output.Text()) // write each line to your log, or anything you need
|
||||
}
|
||||
if err := output.Err(); err != nil {
|
||||
cmd.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// read command's stderr line by line
|
||||
errors := bufio.NewScanner(stderr)
|
||||
for errors.Scan() {
|
||||
l.Errorf(errors.Text()) // write each line to your log, or anything you need
|
||||
}
|
||||
if err := errors.Err(); err != nil {
|
||||
cmd.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for the command to finish
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -163,49 +172,109 @@ func LoadImages(ctx context.Context, s *store.Layout, registry string, only stri
|
||||
return nil
|
||||
}
|
||||
|
||||
func RetryOperation(ctx context.Context, rso *flags.StoreRootOpts, ro *flags.CliRootOpts, operation func() error) error {
|
||||
// RegistryLogin - performs cosign login
|
||||
func RegistryLogin(ctx context.Context, s *store.Layout, registry string, ropts content.RegistryOptions) error {
|
||||
log := log.FromContext(ctx)
|
||||
cosignBinaryPath, err := getCosignPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := exec.Command(cosignBinaryPath, "login", registry, "-u", ropts.Username, "-p", ropts.Password)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error logging into registry: %v, output: %s", err, output)
|
||||
}
|
||||
log.Infof(strings.Trim(string(output), "\n"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func RetryOperation(ctx context.Context, operation func() error) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if !ro.IgnoreErrors {
|
||||
envVar := os.Getenv(consts.HaulerIgnoreErrors)
|
||||
if envVar == "true" {
|
||||
ro.IgnoreErrors = true
|
||||
}
|
||||
}
|
||||
|
||||
// Validate retries and fall back to a default
|
||||
retries := rso.Retries
|
||||
if retries <= 0 {
|
||||
retries = consts.DefaultRetries
|
||||
}
|
||||
|
||||
for attempt := 1; attempt <= rso.Retries; attempt++ {
|
||||
for attempt := 1; attempt <= maxRetries; attempt++ {
|
||||
err := operation()
|
||||
if err == nil {
|
||||
// If the operation succeeds, return nil (no error)
|
||||
// If the operation succeeds, return nil (no error).
|
||||
return nil
|
||||
}
|
||||
|
||||
if ro.IgnoreErrors {
|
||||
if strings.HasPrefix(err.Error(), "function execution failed: no matching signatures: rekor client not provided for online verification") {
|
||||
l.Warnf("warning (attempt %d/%d)... failed tlog verification", attempt, rso.Retries)
|
||||
} else {
|
||||
l.Warnf("warning (attempt %d/%d)... %v", attempt, rso.Retries, err)
|
||||
}
|
||||
} else {
|
||||
if strings.HasPrefix(err.Error(), "function execution failed: no matching signatures: rekor client not provided for online verification") {
|
||||
l.Errorf("error (attempt %d/%d)... failed tlog verification", attempt, rso.Retries)
|
||||
} else {
|
||||
l.Errorf("error (attempt %d/%d)... %v", attempt, rso.Retries, err)
|
||||
}
|
||||
}
|
||||
// Log the error for the current attempt.
|
||||
l.Warnf("error (attempt %d/%d): %v", attempt, maxRetries, err)
|
||||
|
||||
// If this is not the last attempt, wait before retrying
|
||||
if attempt < rso.Retries {
|
||||
time.Sleep(time.Second * consts.RetriesInterval)
|
||||
// If this is not the last attempt, wait before retrying.
|
||||
if attempt < maxRetries {
|
||||
time.Sleep(retryDelay)
|
||||
}
|
||||
}
|
||||
|
||||
// If all attempts fail, return an error
|
||||
return fmt.Errorf("operation unsuccessful after %d attempts", rso.Retries)
|
||||
// If all attempts fail, return an error.
|
||||
return fmt.Errorf("operation failed after %d attempts", maxRetries)
|
||||
}
|
||||
|
||||
func EnsureBinaryExists(ctx context.Context, bin embed.FS) error {
|
||||
// Set up a path for the binary to be copied.
|
||||
binaryPath, err := getCosignPath()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error: %v", err)
|
||||
}
|
||||
|
||||
// Determine the architecture so that we pull the correct embedded binary.
|
||||
arch := runtime.GOARCH
|
||||
rOS := runtime.GOOS
|
||||
binaryName := "cosign"
|
||||
if rOS == "windows" {
|
||||
binaryName = fmt.Sprintf("cosign-%s-%s.exe", rOS, arch)
|
||||
} else {
|
||||
binaryName = fmt.Sprintf("cosign-%s-%s", rOS, arch)
|
||||
}
|
||||
|
||||
// retrieve the embedded binary
|
||||
f, err := bin.ReadFile(fmt.Sprintf("binaries/%s", binaryName))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error: %v", err)
|
||||
}
|
||||
|
||||
// write the binary to the filesystem
|
||||
err = os.WriteFile(binaryPath, f, 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getCosignPath returns the binary path
|
||||
func getCosignPath() (string, error) {
|
||||
// Get the current user's information
|
||||
currentUser, err := user.Current()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error: %v", err)
|
||||
}
|
||||
|
||||
// Get the user's home directory
|
||||
homeDir := currentUser.HomeDir
|
||||
|
||||
// Construct the path to the .hauler directory
|
||||
haulerDir := filepath.Join(homeDir, ".hauler")
|
||||
|
||||
// Create the .hauler directory if it doesn't exist
|
||||
if _, err := os.Stat(haulerDir); os.IsNotExist(err) {
|
||||
// .hauler directory does not exist, create it
|
||||
if err := os.MkdirAll(haulerDir, 0755); err != nil {
|
||||
return "", fmt.Errorf("error creating .hauler directory: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the binary name.
|
||||
rOS := runtime.GOOS
|
||||
binaryName := "cosign"
|
||||
if rOS == "windows" {
|
||||
binaryName = "cosign.exe"
|
||||
}
|
||||
|
||||
// construct path to binary
|
||||
binaryPath := filepath.Join(haulerDir, binaryName)
|
||||
|
||||
return binaryPath, nil
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
gtypes "github.com/google/go-containerregistry/pkg/v1/types"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type Opener func() (io.ReadCloser, error)
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
// Logger provides an interface for all used logger features regardless of logging backend
|
||||
@@ -32,8 +30,9 @@ type Fields map[string]string
|
||||
|
||||
// NewLogger returns a new Logger
|
||||
func NewLogger(out io.Writer) Logger {
|
||||
zerolog.TimeFieldFormat = consts.CustomTimeFormat
|
||||
output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: consts.CustomTimeFormat}
|
||||
customTimeFormat := "2006-01-02 15:04:05"
|
||||
zerolog.TimeFieldFormat = customTimeFormat
|
||||
output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: customTimeFormat}
|
||||
l := log.Output(output)
|
||||
return &logger{
|
||||
zl: l.With().Timestamp().Logger(),
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// CustomWriter forwards log messages to the application's logger
|
||||
type CustomWriter struct {
|
||||
logger Logger
|
||||
level string
|
||||
}
|
||||
|
||||
func (cw *CustomWriter) Write(p []byte) (n int, err error) {
|
||||
message := strings.TrimSpace(string(p))
|
||||
if message != "" {
|
||||
if cw.level == "error" {
|
||||
cw.logger.Errorf("%s", message)
|
||||
} else if cw.level == "info" {
|
||||
cw.logger.Infof("%s", message)
|
||||
} else {
|
||||
cw.logger.Debugf("%s", message)
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// logStream reads lines from a reader and writes them to the provided writer
|
||||
func logStream(reader io.Reader, customWriter *CustomWriter, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
customWriter.Write([]byte(scanner.Text()))
|
||||
}
|
||||
if err := scanner.Err(); err != nil && err != io.EOF {
|
||||
customWriter.logger.Errorf("error reading log stream: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// CaptureOutput redirects stdout and stderr to custom loggers and executes the provided function
|
||||
func CaptureOutput(logger Logger, debug bool, fn func() error) error {
|
||||
// Create pipes for capturing stdout and stderr
|
||||
stdoutReader, stdoutWriter, err := os.Pipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stdout pipe: %w", err)
|
||||
}
|
||||
stderrReader, stderrWriter, err := os.Pipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stderr pipe: %w", err)
|
||||
}
|
||||
|
||||
// Save original stdout and stderr
|
||||
origStdout := os.Stdout
|
||||
origStderr := os.Stderr
|
||||
|
||||
// Redirect stdout and stderr
|
||||
os.Stdout = stdoutWriter
|
||||
os.Stderr = stderrWriter
|
||||
|
||||
// Use WaitGroup to wait for logging goroutines to finish
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
// Start logging goroutines
|
||||
if !debug {
|
||||
go logStream(stdoutReader, &CustomWriter{logger: logger, level: "info"}, &wg)
|
||||
go logStream(stderrReader, &CustomWriter{logger: logger, level: "error"}, &wg)
|
||||
} else {
|
||||
go logStream(stdoutReader, &CustomWriter{logger: logger, level: "debug"}, &wg)
|
||||
go logStream(stderrReader, &CustomWriter{logger: logger, level: "debug"}, &wg)
|
||||
}
|
||||
|
||||
// Run the provided function in a separate goroutine
|
||||
fnErr := make(chan error, 1)
|
||||
go func() {
|
||||
fnErr <- fn()
|
||||
stdoutWriter.Close() // Close writers to signal EOF to readers
|
||||
stderrWriter.Close()
|
||||
}()
|
||||
|
||||
// Wait for logging goroutines to finish
|
||||
wg.Wait()
|
||||
|
||||
// Restore original stdout and stderr
|
||||
os.Stdout = origStdout
|
||||
os.Stderr = origStderr
|
||||
|
||||
// Check for errors from the function
|
||||
if err := <-fnErr; err != nil {
|
||||
return fmt.Errorf("function execution failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -8,8 +8,11 @@ import (
|
||||
"strings"
|
||||
|
||||
gname "github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
const (
|
||||
DefaultNamespace = "hauler"
|
||||
DefaultTag = "latest"
|
||||
)
|
||||
|
||||
type Reference interface {
|
||||
@@ -33,14 +36,14 @@ func NewTagged(n string, tag string) (gname.Reference, error) {
|
||||
|
||||
// Parse will parse a reference and return a name.Reference namespaced with DefaultNamespace if necessary
|
||||
func Parse(ref string) (gname.Reference, error) {
|
||||
r, err := gname.ParseReference(ref, gname.WithDefaultRegistry(""), gname.WithDefaultTag(consts.DefaultTag))
|
||||
r, err := gname.ParseReference(ref, gname.WithDefaultRegistry(""), gname.WithDefaultTag(DefaultTag))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !strings.ContainsRune(r.String(), '/') {
|
||||
ref = consts.DefaultNamespace + "/" + r.String()
|
||||
return gname.ParseReference(ref, gname.WithDefaultRegistry(""), gname.WithDefaultTag(consts.DefaultTag))
|
||||
ref = DefaultNamespace + "/" + r.String()
|
||||
return gname.ParseReference(ref, gname.WithDefaultRegistry(""), gname.WithDefaultTag(DefaultTag))
|
||||
}
|
||||
|
||||
return r, nil
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"github.com/rancherfederal/hauler/pkg/reference"
|
||||
)
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
|
||||
@@ -15,10 +15,10 @@ import (
|
||||
"oras.land/oras-go/pkg/oras"
|
||||
"oras.land/oras-go/pkg/target"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
"hauler.dev/go/hauler/pkg/layer"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/content"
|
||||
"github.com/rancherfederal/hauler/pkg/layer"
|
||||
)
|
||||
|
||||
type Layout struct {
|
||||
@@ -118,7 +118,7 @@ func (l *Layout) AddOCI(ctx context.Context, oci artifacts.OCI, ref string) (oci
|
||||
Digest: digest.FromBytes(mdata),
|
||||
Size: int64(len(mdata)),
|
||||
Annotations: map[string]string{
|
||||
consts.KindAnnotationName: consts.KindAnnotationImage,
|
||||
consts.KindAnnotationName: consts.KindAnnotation,
|
||||
ocispec.AnnotationRefName: ref,
|
||||
},
|
||||
URLs: nil,
|
||||
@@ -151,17 +151,17 @@ func (l *Layout) AddOCICollection(ctx context.Context, collection artifacts.OCIC
|
||||
// This can be a highly destructive operation if the store's directory happens to be inline with other non-store contents
|
||||
// To reduce the blast radius and likelihood of deleting things we don't own, Flush explicitly deletes oci-layout content only
|
||||
func (l *Layout) Flush(ctx context.Context) error {
|
||||
blobs := filepath.Join(l.Root, ocispec.ImageBlobsDir)
|
||||
blobs := filepath.Join(l.Root, "blobs")
|
||||
if err := os.RemoveAll(blobs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
index := filepath.Join(l.Root, ocispec.ImageIndexFile)
|
||||
index := filepath.Join(l.Root, "index.json")
|
||||
if err := os.RemoveAll(index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
layout := filepath.Join(l.Root, ocispec.ImageLayoutFile)
|
||||
layout := filepath.Join(l.Root, "oci-layout")
|
||||
if err := os.RemoveAll(layout); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -240,7 +240,7 @@ func (l *Layout) writeLayer(layer v1.Layer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
dir := filepath.Join(l.Root, ocispec.ImageBlobsDir, d.Algorithm)
|
||||
dir := filepath.Join(l.Root, "blobs", d.Algorithm)
|
||||
if err := os.MkdirAll(dir, os.ModePerm); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/random"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -63,16 +63,16 @@ func TestLayout_AddOCI(t *testing.T) {
|
||||
}
|
||||
|
||||
func setup(t *testing.T) func() error {
|
||||
tempDir, err := os.MkdirTemp("", "hauler")
|
||||
tmpdir, err := os.MkdirTemp("", "hauler")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root = tempDir
|
||||
root = tmpdir
|
||||
|
||||
ctx = context.Background()
|
||||
|
||||
return func() error {
|
||||
os.RemoveAll(tempDir)
|
||||
os.RemoveAll(tmpdir)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
hauler.dev
|
||||
@@ -1,9 +0,0 @@
|
||||
<html>
|
||||
<head>
|
||||
<meta name="go-import" content="hauler.dev/go/hauler git https://github.com/hauler-dev/hauler">
|
||||
<meta http-equiv="refresh" content="0;URL='https://github.com/hauler-dev/hauler'">
|
||||
</head>
|
||||
<body>
|
||||
Redirecting to the <a href="https://github.com/hauler-dev/hauler">hauler source</a>.
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,9 +0,0 @@
|
||||
<html>
|
||||
<head>
|
||||
<meta name="go-import" content="hauler.dev/go/hauler git https://github.com/hauler-dev/hauler">
|
||||
<meta http-equiv="refresh" content="0;URL='https://docs.hauler.dev/docs/intro'">
|
||||
</head>
|
||||
<body>
|
||||
Redirecting you to the <a href="https://docs.hauler.dev/docs/intro">hauler docs</a>
|
||||
</body>
|
||||
</html>
|
||||
179
testdata/certificate-script.sh
vendored
179
testdata/certificate-script.sh
vendored
@@ -1,179 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# setup directories
|
||||
mkdir -p testdata/certs
|
||||
cd testdata/certs
|
||||
|
||||
echo "<!-----------------------------------!>"
|
||||
echo "<! Certificate Authority Certificate !>"
|
||||
echo "<!-----------------------------------!>"
|
||||
|
||||
echo "Generating certificate authority private key..."
|
||||
openssl genrsa -out root-ca.key 4096
|
||||
|
||||
echo "Generating certificate authority configuration file..."
|
||||
cat <<EOF > root-ca.cnf
|
||||
[ req ]
|
||||
default_bits = 4096
|
||||
default_keyfile = root-ca.key
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = v3_ca
|
||||
prompt = no
|
||||
|
||||
[ req_distinguished_name ]
|
||||
C = US
|
||||
ST = VIRGINIA
|
||||
L = RESTON
|
||||
O = HAULER
|
||||
OU = HAULER DEV
|
||||
CN = CERTIFICATE AUTHORITY CERTIFICATE
|
||||
|
||||
[v3_ca]
|
||||
keyUsage = critical, keyCertSign, cRLSign
|
||||
extendedKeyUsage = anyExtendedKeyUsage
|
||||
basicConstraints = critical, CA:TRUE
|
||||
EOF
|
||||
|
||||
echo "Generating certificate authority certificate signing request..."
|
||||
openssl req -new -sha256 -key root-ca.key -out root-ca.csr -config root-ca.cnf
|
||||
|
||||
echo "Generating certificate authority certificate..."
|
||||
openssl x509 -req -in root-ca.csr -signkey root-ca.key -days 3650 -out root-ca.crt -extensions v3_ca -extfile root-ca.cnf
|
||||
|
||||
echo "Inspecting certificate authority certificate..."
|
||||
openssl x509 -text -noout -in root-ca.crt > ca.txt
|
||||
|
||||
echo "<!------------------------------------------------!>"
|
||||
echo "<! Intermediary Certificate Authority Certificate !>"
|
||||
echo "<!------------------------------------------------!>"
|
||||
|
||||
echo "Generating intermediary certificate authority private key..."
|
||||
openssl genrsa -out intermediary-ca.key 4096
|
||||
|
||||
echo "Generating intermediary certificate authority configuration file..."
|
||||
cat <<EOF > intermediary-ca.cnf
|
||||
[ req ]
|
||||
default_bits = 4096
|
||||
default_keyfile = intermediary-ca.key
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = v3_ca
|
||||
prompt = no
|
||||
|
||||
[ req_distinguished_name ]
|
||||
C = US
|
||||
ST = VIRGINIA
|
||||
L = RESTON
|
||||
O = HAULER
|
||||
OU = HAULER DEV
|
||||
CN = INTERMEDIARY CERTIFICATE AUTHORITY CERTIFICATE
|
||||
|
||||
[v3_ca]
|
||||
keyUsage = critical, keyCertSign, cRLSign
|
||||
extendedKeyUsage = anyExtendedKeyUsage
|
||||
basicConstraints = critical, CA:TRUE
|
||||
EOF
|
||||
|
||||
echo "Generating intermediary certificate authority certificate signing request..."
|
||||
openssl req -new -sha256 -key intermediary-ca.key -out intermediary-ca.csr -config intermediary-ca.cnf
|
||||
|
||||
echo "Generating intermediary certificate authority certificate..."
|
||||
openssl x509 -req -in intermediary-ca.csr -CA root-ca.crt -CAkey root-ca.key -CAcreateserial -out intermediary-ca.crt -days 3650 -sha256 -extfile intermediary-ca.cnf -extensions v3_ca
|
||||
|
||||
echo "Inspecting intermediary certificate authority certificate..."
|
||||
openssl x509 -text -noout -in intermediary-ca.crt > intermediary-ca.txt
|
||||
|
||||
echo "Verifying intermediary certificate authority certificate..."
|
||||
openssl verify -CAfile root-ca.crt intermediary-ca.crt
|
||||
|
||||
echo "Generating full certificate chain..."
|
||||
cat intermediary-ca.crt root-ca.crt > cacerts.pem
|
||||
|
||||
echo "<!-----------------------------------------------------------------!>"
|
||||
echo "<! Server Certificate Signed by Intermediary Certificate Authority !>"
|
||||
echo "<!-----------------------------------------------------------------!>"
|
||||
|
||||
echo "Generating server private key..."
|
||||
openssl genrsa -out server-cert.key 4096
|
||||
|
||||
echo "Generating server certificate signing config file..."
|
||||
cat <<EOF > server-cert.cnf
|
||||
[ req ]
|
||||
default_bits = 4096
|
||||
default_keyfile = server-cert.key
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = v3_req
|
||||
prompt = no
|
||||
|
||||
[ req_distinguished_name ]
|
||||
C = US
|
||||
ST = VIRGINIA
|
||||
L = RESTON
|
||||
O = HAULER
|
||||
OU = HAULER DEV
|
||||
CN = SERVER CERTIFICATE
|
||||
|
||||
[v3_req]
|
||||
keyUsage = digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = serverAuth
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[ alt_names ]
|
||||
DNS.1 = localhost
|
||||
DNS.2 = registry.localhost
|
||||
DNS.3 = fileserver.localhost
|
||||
EOF
|
||||
|
||||
echo "Generating server certificate signing request..."
|
||||
openssl req -new -sha256 -key server-cert.key -out server-cert.csr -config server-cert.cnf
|
||||
|
||||
echo "Generating server certificate..."
|
||||
openssl x509 -req -in server-cert.csr -CA intermediary-ca.crt -CAkey intermediary-ca.key -CAcreateserial -out server-cert.crt -days 3650 -sha256 -extfile server-cert.cnf -extensions v3_req
|
||||
|
||||
echo "Inspecting server certificate..."
|
||||
openssl x509 -text -noout -in server-cert.crt > server-cert.txt
|
||||
|
||||
echo "Verifying server certificate..."
|
||||
openssl verify -CAfile cacerts.pem server-cert.crt
|
||||
|
||||
echo "<!-----------------------------------------------------------------!>"
|
||||
echo "<! Client Certificate Signed by Intermediary Certificate Authority !>"
|
||||
echo "<!-----------------------------------------------------------------!>"
|
||||
|
||||
echo "Generating client private key..."
|
||||
openssl genrsa -out client-cert.key 4096
|
||||
|
||||
echo "Generating client certificate signing config file..."
|
||||
cat <<EOF > client-cert.cnf
|
||||
[ req ]
|
||||
default_bits = 4096
|
||||
default_keyfile = client-cert.key
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = v3_req
|
||||
prompt = no
|
||||
|
||||
[ req_distinguished_name ]
|
||||
C = US
|
||||
ST = VIRGINIA
|
||||
L = RESTON
|
||||
O = HAULER
|
||||
OU = HAULER DEV
|
||||
CN = CLIENT CERTIFICATE
|
||||
|
||||
[ v3_req ]
|
||||
keyUsage = digitalSignature
|
||||
extendedKeyUsage = clientAuth
|
||||
EOF
|
||||
|
||||
echo "Generating client certificate signing request..."
|
||||
openssl req -new -sha256 -key client-cert.key -out client-cert.csr -config client-cert.cnf
|
||||
|
||||
echo "Generating client certificate..."
|
||||
openssl x509 -req -in client-cert.csr -CA intermediary-ca.crt -CAkey intermediary-ca.key -CAcreateserial -out client-cert.crt -days 3650 -sha256 -extfile client-cert.cnf -extensions v3_req
|
||||
|
||||
echo "Inspecting client certificate..."
|
||||
openssl x509 -text -noout -in client-cert.crt > client-cert.txt
|
||||
|
||||
echo "Verifying client certificate..."
|
||||
openssl verify -CAfile cacerts.pem client-cert.crt
|
||||
BIN
testdata/haul.tar.zst
vendored
Normal file → Executable file
BIN
testdata/haul.tar.zst
vendored
Normal file → Executable file
Binary file not shown.
99
testdata/hauler-manifest-pipeline.yaml
vendored
99
testdata/hauler-manifest-pipeline.yaml
vendored
@@ -1,99 +0,0 @@
|
||||
# v1 manifests
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Images
|
||||
metadata:
|
||||
name: hauler-content-images-example
|
||||
spec:
|
||||
images:
|
||||
- name: busybox
|
||||
- name: busybox:stable
|
||||
platform: linux/amd64
|
||||
- name: gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Charts
|
||||
metadata:
|
||||
name: hauler-content-charts-example
|
||||
spec:
|
||||
charts:
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
version: 2.8.4
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
version: 2.8.3
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
version: 1.0.6
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
version: 1.0.4
|
||||
- name: rancher-cluster-templates-0.5.2.tgz
|
||||
repoURL: .
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: hauler-content-files-example
|
||||
spec:
|
||||
files:
|
||||
- path: https://get.rke2.io/install.sh
|
||||
- path: https://get.rke2.io/install.sh
|
||||
name: rke2-install.sh
|
||||
- path: testdata/hauler-manifest.yaml
|
||||
- path: testdata/hauler-manifest.yaml
|
||||
name: hauler-manifest-local.yaml
|
||||
---
|
||||
# v1alpha1 manifests
|
||||
apiVersion: content.hauler.cattle.io/v1alpha1
|
||||
kind: Images
|
||||
metadata:
|
||||
name: hauler-content-images-example
|
||||
spec:
|
||||
images:
|
||||
- name: busybox
|
||||
- name: busybox:stable
|
||||
platform: linux/amd64
|
||||
- name: gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1alpha1
|
||||
kind: Charts
|
||||
metadata:
|
||||
name: hauler-content-charts-example
|
||||
spec:
|
||||
charts:
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
version: 2.8.4
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
version: 2.8.3
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
version: 1.0.6
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
version: 1.0.4
|
||||
- name: rancher-cluster-templates-0.5.2.tgz
|
||||
repoURL: .
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1alpha1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: hauler-content-files-example
|
||||
spec:
|
||||
files:
|
||||
- path: https://get.rke2.io/install.sh
|
||||
- path: https://get.rke2.io/install.sh
|
||||
name: rke2-install.sh
|
||||
- path: testdata/hauler-manifest.yaml
|
||||
- path: testdata/hauler-manifest.yaml
|
||||
name: hauler-manifest-local.yaml
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user