Compare commits

..

19 Commits

Author SHA1 Message Date
Josh Wolf
933af22907 mvp rc prep (#65)
* add bootstrap script to k3s collection
* update ci to 1.17.x and temporarily skip tests (:
* remove helmtemplater
* update dependencies
* update releaser
* rename extremely poorly worded gitignore
2021-11-11 12:11:29 -07:00
Josh Wolf
99a9a1c54f Merge pull request #63 from rancherfederal/collections
support `collections` (sets of primitive `contents`)
2021-11-10 20:28:02 -07:00
Josh Wolf
8146a88a5d move cache logic to a store configurable option 2021-11-10 20:26:12 -07:00
Josh Wolf
4ee6129154 add thick chart builtin collection (chart with required images) 2021-11-10 20:11:15 -07:00
Josh Wolf
20cd37e173 add collections type (group of contents), and initial k3s builtin 2021-11-10 19:36:50 -07:00
Josh Wolf
8ab9fd6a38 represent all content as oci layouts (artifact.OCI interface), add blob caching and ephemeral stores (#59)
* represent all content as artifact.OCI interface and manipulate/add all content using oci layouts
* initial brew taps and macos universal binary
* change mediaType to string for better compatibility with other libraries
* ensure config is minimally viable for file/charts
* add transparent layer caching (filesystem) to artifact operations, clean up layer interface used by file/chart
* add store list and store copy commands

Signed-off-by: Josh Wolf <josh@joshwolf.dev>
2021-11-10 10:37:21 -07:00
Josh Wolf
8a46c20db6 Merge pull request #55 from rancherfederal/cli-ux
* cli ux and verbiage cleanup
* add `hauler store add` command
2021-11-01 14:36:24 -07:00
Josh Wolf
cde59cea74 add 'store add' set of commands for content adding 2021-11-01 15:29:08 -06:00
Josh Wolf
786e63f2ef allow config file to be passed to hauler store serve 2021-11-01 14:06:22 -06:00
Josh Wolf
880b296759 Merge pull request #56 from rancherfederal/content-tests
add _basic_ unit tests to each content type
2021-11-01 12:04:03 -07:00
Josh Wolf
4835699746 add _basic_ unit tests to each content type 2021-11-01 13:00:51 -06:00
Josh Wolf
e5384251f2 add cli aliases 2021-11-01 11:22:26 -06:00
Josh Wolf
ffa6943d6d cli ux and verbiage cleanup 2021-11-01 11:10:32 -06:00
Josh Wolf
372af894b3 refactor to baseline on pluggable oci collection/distribution (#41)
refactor to baseline on pluggable oci collection/distribution

Co-authored-by: Josh Wolf <josh@joshwolf.dev>
2021-10-29 15:55:20 -06:00
Josh Wolf
cea46d28fa Merge pull request #31 from rancherfederal/issue-30
bug: fix error when running a package with 0 bundles
2021-06-24 08:31:51 -06:00
Josh Wolf
1ea08063ac Merge pull request #32 from rancherfederal/wips
update readme with more obvious wip
2021-06-24 08:31:23 -06:00
Josh Wolf
2e5a8f897e update readme with more obvious wip 2021-06-24 08:30:50 -06:00
Josh Wolf
39e37cc04a clean up unused move fns 2021-06-24 07:39:20 -06:00
Josh Wolf
25d1c5eda0 bug: fix error when running a package with 0 bundles 2021-06-22 10:10:29 -06:00
108 changed files with 4674 additions and 6238 deletions

View File

@@ -6,62 +6,26 @@ on:
tags:
- '*'
jobs:
test:
strategy:
matrix:
go-version: [1.16.x]
os: [ubuntu-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: go test ./...
- name: Run lint/vet
run: |
go get -u golang.org/x/lint/golint
go mod tidy
golint ./...
go vet ./...
create-release:
needs: test
goreleaser:
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
steps:
- name: Download release notes utility
env:
GH_REL_URL: https://github.com/buchanae/github-release-notes/releases/download/0.2.0/github-release-notes-linux-amd64-0.2.0.tar.gz
run: cd /tmp && curl -sSL ${GH_REL_URL} | tar xz && sudo mv github-release-notes /usr/local/bin/
- name: Generate release notes
run: |
echo 'CHANGELOG' > /tmp/release.txt
#github-release-notes -org rancherfederal -repo hauler -since-latest-release -include-author >> /tmp/release.txt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.16
- name: Build and run Hauler package build
run: |
mkdir bin
go build -o bin ./cmd/...
./bin/hauler package build
- name: Run GoReleaser
id: goreleaser
uses: goreleaser/goreleaser-action@v1
with:
version: latest
args: release --release-notes=/tmp/release.txt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
-
name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.17.x
-
name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
with:
distribution: goreleaser
version: latest
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

3
.gitignore vendored
View File

@@ -1,3 +1,4 @@
.DS_Store
# Vagrant
.vagrant
@@ -10,6 +11,7 @@
*.njsproj
*.sln
*.sw?
*.dir-locals.el
# old, ad-hoc ignores
artifacts
@@ -25,3 +27,4 @@ dist/
tmp/
bin/
pkg.yaml
haul/

View File

@@ -1,16 +1,32 @@
project_name: hauler
before:
hooks:
- go mod tidy
- go mod download
builds:
- main: cmd/hauler/main.go
goos:
- linux
- darwin
- windows
goarch:
- amd64
- arm64
env:
- CGO_ENABLED=0
flags:
- -tags=containers_image_openpgp containers_image_ostree
release:
extra_files:
- glob: ./pkg.tar.zst
universal_binaries:
- replace: true
changelog:
skip: false
use: git
brews:
- name: hauler
tap:
owner: rancherfederal
name: homebrew-tap
token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}"
folder: Formula
description: "Hauler CLI"

View File

@@ -12,6 +12,9 @@ all: fmt vet install test
build:
mkdir bin;\
$(GO_BUILD_ENV) go build -o bin ./cmd/...;\
build-all: fmt vet
goreleaser build --rm-dist --snapshot
install:
$(GO_BUILD_ENV) go install

View File

@@ -1,70 +1,11 @@
# Hauler - Kubernetes Air Gap Migration
# Hauler: Airgap Assistant
## WARNING - Work In Progress
__⚠️ WARNING: This is an experimental, work in progress project. _Everything_ is subject to change, and it is actively in development, so let us know what you think!__
Hauler is a tool designed to ease the burden of working with containers and kubernetes in an airgap. Several components of hauler are used in unison to provide airgap utilities.
`hauler` is a command line tool for that aims to simplify the painpoints that exist around airgapped Kubernetes deployments.
It remains as unopinionated as possible, and does _not_ attempt to enforce a specific cluster type or application deployment model.
Instead, it focuses solely on simplifying the primary airgap pain points:
* artifact collection
* artifact distribution
Hauler's utility is split into a few commands intended to solve increasingly complex airgapped use cases.
__Portable self contained clusters__:
Within the `hauler package` subset of commands, `Packages` (name to be finalized) can be created, updated, and ran.
A `Package` is a hauler specific, configurable, self-contained, compressed archive (`*.tar.zst`) that contains all dependencies needed to 1) create a kubernetes cluster, 2) deploy resources into the cluster.
```bash
# Build a minimal portable k8s cluster
hauler package build
# Build a package that deploys resources when deployed
hauler package build -p path/to/chart -p path/to/manifests -i extra/image:latest -i busybox:musl
# Build a package that deploys a cluster, oci registry, and sample app on boot
# Note the aliases introduced
hauler pkg b -p testdata/docker-registry -p testdata/rawmanifests
```
Hauler packages at their core stand on the shoulders of other technologies (`k3s`, `rke2`, and `fleet`), and as such, are designed to be extremely flexible.
Common use cases are to build turn key, appliance like clusters designed to boot on disconnected or low powered devices. Or portable "utility" clusters that can act as a stepping stone for further downstream deployable infrastructure. Since ever `Package` is built as an entirely self contained archive, disconnected environments are _always_ a first class citizen.
__Image Relocation__:
For disconnected workloads that don't require a cluster to be created first, images can be efficiently packaged and relocated with `hauler relocate`.
Images are stored as a compressed archive of an `oci` layout, ensuring only the required de-duplicated image layers are packaged and transferred.
## Installation
Hauler is and will always be a statically compiled binary, we strongly believe in a zero dependency tool is key to reducing operational complexity in airgap environments.
Before GA, hauler can be downloaded from the releases page for every tagged release
## Dev
A `Vagrant` file is provided as a testing ground. The boot scripts at `vagrant-scripts/*.sh` will be ran on boot to ensure the dev environment is airgapped.
```bash
vagrant up
vagrant ssh
```
More info can be found in the [vagrant docs](VAGRANT.md).
## WIP Warnings
API stability (including as a code library and as a network endpoint) is NOT guaranteed before `v1` API definitions and a 1.0 release. The following recommendations are made regarding usage patterns of hauler:
- `alpha` (`v1alpha1`, `v1alpha2`, ...) API versions: use **_only_** through `haulerctl`
- `beta` (`v1beta1`, `v1beta2`, ...) API versions: use as an **_experimental_** library and/or API endpoint
- `stable` (`v1`, `v2`, ...) API versions: use as stable CLI tool, library, and/or API endpoint
### Build
```bash
# Current arch build
make build
# Multiarch dev build
goreleaser build --rm-dist --snapshot
```
`hauler` achieves this by leaning heavily on the [oci spec](https://github.com/opencontainers), and the vast ecosystem of tooling available for fetching and distributing oci content.

View File

@@ -1,61 +0,0 @@
package app
import (
"context"
"github.com/rancherfederal/hauler/pkg/oci"
"github.com/spf13/cobra"
)
var (
copyLong = `hauler copies artifacts stored on a registry to local disk`
copyExample = `
# Run Hauler
hauler copy locahost:5000/artifacts:latest
`
)
type copyOpts struct {
*rootOpts
dir string
sourceRef string
}
// NewCopyCommand creates a new sub command under
// hauler for coping files to local disk
func NewCopyCommand() *cobra.Command {
opts := &copyOpts{
rootOpts: &ro,
}
cmd := &cobra.Command{
Use: "copy",
Short: "Download artifacts from OCI registry to local disk",
Long: copyLong,
Example: copyExample,
Aliases: []string{"c", "cp"},
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.sourceRef = args[0]
return opts.Run(opts.sourceRef)
},
}
f := cmd.Flags()
f.StringVarP(&opts.dir, "dir", "d", ".", "Target directory for file copy")
return cmd
}
// Run performs the operation.
func (o *copyOpts) Run(src string) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
if err := oci.Get(ctx, src, o.dir); err != nil {
o.logger.Errorf("error copy artifact %s to local directory %s: %v", src, o.dir, err)
}
return nil
}

View File

@@ -1,42 +0,0 @@
package app
import (
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/spf13/cobra"
)
type ociOpts struct {
insecure bool
plainHTTP bool
}
const (
haulerMediaType = "application/vnd.oci.image"
)
func NewOCICommand() *cobra.Command {
opts := ociOpts{}
cmd := &cobra.Command{
Use: "oci",
Short: "oci stuff",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(NewOCIPushCommand())
cmd.AddCommand(NewOCIPullCommand())
f := cmd.Flags()
f.BoolVarP(&opts.insecure, "insecure", "", false, "Connect to registry without certs")
f.BoolVarP(&opts.plainHTTP, "plain-http", "", false, "Connect to registry over plain http")
return cmd
}
func (o *ociOpts) resolver() (remotes.Resolver, error) {
resolver := docker.NewResolver(docker.ResolverOptions{PlainHTTP: true})
return resolver, nil
}

View File

@@ -1,67 +0,0 @@
package app
import (
"context"
"github.com/deislabs/oras/pkg/content"
"github.com/deislabs/oras/pkg/oras"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
type ociPullOpts struct {
ociOpts
sourceRef string
outDir string
}
func NewOCIPullCommand() *cobra.Command {
opts := ociPullOpts{}
cmd := &cobra.Command{
Use: "pull",
Short: "oci pull",
Aliases: []string{"p"},
Args: cobra.MinimumNArgs(1),
PreRunE: func(cmd *cobra.Command, args []string) error {
return opts.PreRun()
},
RunE: func(cmd *cobra.Command, args []string) error {
opts.sourceRef = args[0]
return opts.Run()
},
}
f := cmd.Flags()
f.StringVarP(&opts.outDir, "out-dir", "o", ".", "output directory")
return cmd
}
func (o *ociPullOpts) PreRun() error {
return nil
}
func (o *ociPullOpts) Run() error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
store := content.NewFileStore(o.outDir)
defer store.Close()
allowedMediaTypes := []string{
haulerMediaType,
}
resolver, err := o.resolver()
if err != nil {
return err
}
desc, _, err := oras.Pull(ctx, resolver, o.sourceRef, store, oras.WithAllowedMediaTypes(allowedMediaTypes))
logrus.Infof("pulled %s with digest: %s", o.sourceRef, desc.Digest)
return nil
}

View File

@@ -1,74 +0,0 @@
package app
import (
"context"
"github.com/deislabs/oras/pkg/content"
"github.com/deislabs/oras/pkg/oras"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"os"
)
type ociPushOpts struct {
ociOpts
targetRef string
pathRef string
}
func NewOCIPushCommand() *cobra.Command {
opts := ociPushOpts{}
cmd := &cobra.Command{
Use: "push",
Short: "oci push",
Aliases: []string{"p"},
Args: cobra.MinimumNArgs(2),
PreRunE: func(cmd *cobra.Command, args []string) error {
return opts.PreRun()
},
RunE: func(cmd *cobra.Command, args []string) error {
opts.pathRef = args[0]
opts.targetRef = args[1]
return opts.Run()
},
}
return cmd
}
func (o *ociPushOpts) PreRun() error {
return nil
}
func (o *ociPushOpts) Run() error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
data, err := os.ReadFile(o.pathRef)
if err != nil {
return err
}
resolver, err := o.resolver()
if err != nil {
return err
}
store := content.NewMemoryStore()
contents := []ocispec.Descriptor{
store.Add(o.pathRef, haulerMediaType, data),
}
desc, err := oras.Push(ctx, resolver, o.targetRef, store, contents)
if err != nil {
return err
}
logrus.Infof("pushed %s to %s with digest: %s", o.pathRef, o.targetRef, desc.Digest)
return nil
}

View File

@@ -1,25 +0,0 @@
package app
import "github.com/spf13/cobra"
type pkgOpts struct{}
func NewPkgCommand() *cobra.Command {
opts := &pkgOpts{}
//TODO
_ = opts
cmd := &cobra.Command{
Use: "pkg",
Short: "Interact with packages",
Aliases: []string{"p", "package"},
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(NewPkgBuildCommand())
cmd.AddCommand(NewPkgRunCommand())
return cmd
}

View File

@@ -1,202 +0,0 @@
package app
import (
"context"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/driver"
"github.com/rancherfederal/hauler/pkg/packager"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"os"
"sigs.k8s.io/yaml"
)
type pkgBuildOpts struct {
*rootOpts
cfgFile string
name string
dir string
driver string
driverVersion string
fleetVersion string
images []string
paths []string
}
func NewPkgBuildCommand() *cobra.Command {
opts := pkgBuildOpts{
rootOpts: &ro,
}
cmd := &cobra.Command{
Use: "build",
Short: "Build a self contained compressed archive of manifests and images",
Long: `
Compressed archives created with this command can be extracted and run anywhere the underlying 'driver' can be run.
Archives are built by collecting all the dependencies (images and manifests) required.
Examples:
# Build a package containing a helm chart with images autodetected from the generated helm chart
hauler package build -p path/to/helm/chart
# Build a package, sourcing from multiple manifest sources and additional images not autodetected
hauler pkg build -p path/to/raw/manifests -p path/to/kustomize -i busybox:latest -i busybox:musl
# Build a package using a different version of k3s
hauler p build -p path/to/chart --driver-version "v1.20.6+k3s1"
# Build a package from a config file (if ./pkg.yaml does not exist, one will be created)
hauler package build -c ./pkg.yaml
`,
Aliases: []string{"b"},
PreRunE: func(cmd *cobra.Command, args []string) error {
return opts.PreRun()
},
RunE: func(cmd *cobra.Command, args []string) error {
return opts.Run()
},
}
f := cmd.PersistentFlags()
f.StringVarP(&opts.name, "name", "n", "pkg",
"name of the pkg to create, will dicate file name")
f.StringVarP(&opts.cfgFile, "config", "c", "",
"path to config file")
f.StringVar(&opts.dir, "directory", "",
"Working directory for building package, if empty, an ephemeral temporary directory will be used. Set this to persist package artifacts between builds.")
f.StringVarP(&opts.driver, "driver", "d", "k3s",
"")
f.StringVar(&opts.driverVersion, "driver-version", "v1.21.1+k3s1",
"")
f.StringVar(&opts.fleetVersion, "fleet-version", "v0.3.5",
"")
f.StringSliceVarP(&opts.paths, "path", "p", []string{},
"")
f.StringSliceVarP(&opts.images, "image", "i", []string{},
"")
return cmd
}
func (o *pkgBuildOpts) PreRun() error {
_, err := os.Stat(o.cfgFile)
if os.IsNotExist(err) {
if o.cfgFile == "" {
return nil
}
o.logger.Warnf("Did not find an existing %s, creating one", o.cfgFile)
p := o.toPackage()
data, err := yaml.Marshal(p)
if err != nil {
return err
}
if err := os.WriteFile(o.cfgFile, data, 0644); err != nil {
return err
}
} else if err != nil {
return err
}
return nil
}
func (o *pkgBuildOpts) Run() error {
o.logger.Infof("Building package")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var p v1alpha1.Package
if o.cfgFile != "" {
o.logger.Infof("Config file '%s' specified, attempting to load existing package config", o.cfgFile)
cfgData, err := os.ReadFile(o.cfgFile)
if err != nil {
return err
}
if err := yaml.Unmarshal(cfgData, &p); err != nil {
return err
}
} else {
o.logger.Infof("No config file specified, strictly using cli arguments")
p = o.toPackage()
}
var wdir string
if o.dir != "" {
if _, err := os.Stat(o.dir); err != nil {
o.logger.Errorf("Failed to use specified working directory: %s\n%v", err)
return err
}
wdir = o.dir
} else {
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
return err
}
defer os.RemoveAll(tmpdir)
wdir = tmpdir
}
pkgr := packager.NewPackager(wdir, o.logger)
d := driver.NewDriver(p.Spec.Driver)
if _, bErr := pkgr.PackageBundles(ctx, p.Spec.Paths...); bErr != nil {
return bErr
}
if iErr := pkgr.PackageImages(ctx, o.images...); iErr != nil {
return iErr
}
if dErr := pkgr.PackageDriver(ctx, d); dErr != nil {
return dErr
}
if fErr := pkgr.PackageFleet(ctx, p.Spec.Fleet); fErr != nil {
return fErr
}
a := packager.NewArchiver()
if aErr := pkgr.Archive(a, p, o.name); aErr != nil {
return aErr
}
o.logger.Successf("Finished building package")
return nil
}
func (o *pkgBuildOpts) toPackage() v1alpha1.Package {
p := v1alpha1.Package{
TypeMeta: metav1.TypeMeta{
Kind: "",
APIVersion: "",
},
ObjectMeta: metav1.ObjectMeta{
Name: o.name,
},
Spec: v1alpha1.PackageSpec{
Fleet: v1alpha1.Fleet{
Version: o.fleetVersion,
},
Driver: v1alpha1.Driver{
Type: o.driver,
Version: o.driverVersion,
},
Paths: o.paths,
Images: o.images,
},
}
return p
}

View File

@@ -1,84 +0,0 @@
package app
import (
"os"
"testing"
)
func Test_pkgBuildOpts_Run(t *testing.T) {
l, _ := setupCliLogger(os.Stdout, "debug")
tro := rootOpts{l}
type fields struct {
rootOpts *rootOpts
cfgFile string
name string
driver string
driverVersion string
fleetVersion string
images []string
paths []string
}
tests := []struct {
name string
fields fields
wantErr bool
}{
{
name: "should package all types of local manifests",
fields: fields{
rootOpts: &tro,
cfgFile: "pkg.yaml",
name: "k3s",
driver: "k3s",
driverVersion: "v1.21.1+k3s1",
fleetVersion: "v0.3.5",
images: nil,
paths: []string{
"../../../testdata/docker-registry",
"../../../testdata/rawmanifests",
},
},
wantErr: false,
},
{
name: "should package using fleet.yaml",
fields: fields{
rootOpts: &tro,
cfgFile: "pkg.yaml",
name: "k3s",
driver: "k3s",
driverVersion: "v1.21.1+k3s1",
fleetVersion: "v0.3.5",
images: nil,
paths: []string{
"../../../testdata/custom",
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
o := &pkgBuildOpts{
rootOpts: tt.fields.rootOpts,
cfgFile: tt.fields.cfgFile,
name: tt.fields.name,
driver: tt.fields.driver,
driverVersion: tt.fields.driverVersion,
fleetVersion: tt.fields.fleetVersion,
images: tt.fields.images,
paths: tt.fields.paths,
}
if err := o.PreRun(); err != nil {
t.Errorf("PreRun() error = %v", err)
}
defer os.Remove(o.cfgFile)
if err := o.Run(); (err != nil) != tt.wantErr {
t.Errorf("Run() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@@ -1,91 +0,0 @@
package app
import (
"context"
"github.com/rancherfederal/hauler/pkg/bootstrap"
"github.com/rancherfederal/hauler/pkg/driver"
"github.com/rancherfederal/hauler/pkg/packager"
"github.com/spf13/cobra"
"os"
)
type pkgRunOpts struct {
*rootOpts
cfgFile string
}
func NewPkgRunCommand() *cobra.Command {
opts := pkgRunOpts{
rootOpts: &ro,
}
cmd := &cobra.Command{
Use: "run",
Short: "Run a compressed archive",
Long: `
Run a compressed archive created from a 'hauler package build'.
Examples:
# Run a package
hauler package run pkg.tar.zst
`,
Aliases: []string{"r"},
Args: cobra.MinimumNArgs(1),
PreRunE: func(cmd *cobra.Command, args []string) error {
return opts.PreRun()
},
RunE: func(cmd *cobra.Command, args []string) error {
return opts.Run(args[0])
},
}
return cmd
}
func (o *pkgRunOpts) PreRun() error {
return nil
}
func (o *pkgRunOpts) Run(pkgPath string) error {
o.logger.Infof("Running from '%s'", pkgPath)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
return err
}
o.logger.Debugf("Using temporary working directory: %s", tmpdir)
a := packager.NewArchiver()
if err := packager.Unpackage(a, pkgPath, tmpdir); err != nil {
return err
}
o.logger.Debugf("Unpackaged %s", pkgPath)
b, err := bootstrap.NewBooter(tmpdir, o.logger)
if err != nil {
return err
}
d := driver.NewDriver(b.Package.Spec.Driver)
if preErr := b.PreBoot(ctx, d); preErr != nil {
return preErr
}
if bErr := b.Boot(ctx, d); bErr != nil {
return bErr
}
if postErr := b.PostBoot(ctx, d); postErr != nil {
return postErr
}
o.logger.Successf("Access the cluster with '/opt/hauler/bin/kubectl'")
return nil
}

View File

@@ -1,33 +0,0 @@
package app
import (
"github.com/spf13/cobra"
)
type relocateOpts struct {
inputFile string
*rootOpts
}
// NewRelocateCommand creates a new sub command under
// haulterctl for relocating images and artifacts
func NewRelocateCommand() *cobra.Command {
opts := &relocateOpts{
rootOpts: &ro,
}
cmd := &cobra.Command{
Use: "relocate",
Short: "relocate images or artifacts to a registry",
Long: "",
Aliases: []string{"r"},
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(NewRelocateArtifactsCommand(opts))
cmd.AddCommand(NewRelocateImagesCommand(opts))
return cmd
}

View File

@@ -1,56 +0,0 @@
package app
import (
"context"
"github.com/rancherfederal/hauler/pkg/oci"
"github.com/spf13/cobra"
)
type relocateArtifactsOpts struct {
*relocateOpts
destRef string
}
var (
relocateArtifactsLong = `hauler relocate artifacts process an archive with files to be pushed to a registry`
relocateArtifactsExample = `
# Run Hauler
hauler relocate artifacts artifacts.tar.zst locahost:5000/artifacts:latest
`
)
// NewRelocateArtifactsCommand creates a new sub command of relocate for artifacts
func NewRelocateArtifactsCommand(relocate *relocateOpts) *cobra.Command {
opts := &relocateArtifactsOpts{
relocateOpts: relocate,
}
cmd := &cobra.Command{
Use: "artifacts",
Short: "Use artifact from bundle artifacts to populate a target file server with the artifact's contents",
Long: relocateArtifactsLong,
Example: relocateArtifactsExample,
Args: cobra.MinimumNArgs(2),
Aliases: []string{"a", "art", "af"},
RunE: func(cmd *cobra.Command, args []string) error {
opts.inputFile = args[0]
opts.destRef = args[1]
return opts.Run(opts.destRef, opts.inputFile)
},
}
return cmd
}
func (o *relocateArtifactsOpts) Run(dst string, input string) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
if err := oci.Put(ctx, input, dst); err != nil {
o.logger.Errorf("error pushing artifact to registry %s: %v", dst, err)
}
return nil
}

View File

@@ -1,103 +0,0 @@
package app
import (
"os"
"path/filepath"
"strings"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/layout"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/rancherfederal/hauler/pkg/oci"
"github.com/rancherfederal/hauler/pkg/packager"
"github.com/spf13/cobra"
)
var (
relocateImagesLong = `hauler relocate images processes a bundle provides by hauler package build and copies all of
the collected images to a registry`
relocateImagesExample = `
# Run Hauler
hauler relocate images pkg.tar.zst locahost:5000
`
)
type relocateImagesOpts struct {
*relocateOpts
destRef string
}
// NewRelocateImagesCommand creates a new sub command of relocate for images
func NewRelocateImagesCommand(relocate *relocateOpts) *cobra.Command {
opts := &relocateImagesOpts{
relocateOpts: relocate,
}
cmd := &cobra.Command{
Use: "images",
Short: "Use artifact from bundle images to populate a target registry with the artifact's images",
Long: relocateImagesLong,
Example: relocateImagesExample,
Args: cobra.MinimumNArgs(2),
Aliases: []string{"i", "img", "imgs"},
RunE: func(cmd *cobra.Command, args []string) error {
opts.inputFile = args[0]
opts.destRef = args[1]
return opts.Run(opts.destRef, opts.inputFile)
},
}
return cmd
}
func (o *relocateImagesOpts) Run(dst string, input string) error {
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
return err
}
o.logger.Debugf("Using temporary working directory: %s", tmpdir)
a := packager.NewArchiver()
if err := packager.Unpackage(a, input, tmpdir); err != nil {
o.logger.Errorf("error unpackaging input %s: %v", input, err)
}
o.logger.Debugf("Unpackaged %s", input)
path := filepath.Join(tmpdir, "layout")
ly, err := layout.FromPath(path)
if err != nil {
o.logger.Errorf("error creating OCI layout: %v", err)
}
for nm, hash := range oci.ListImages(ly) {
n := strings.SplitN(nm, "/", 2)
img, err := ly.Image(hash)
o.logger.Infof("Copy %s to %s", n[1], dst)
if err != nil {
o.logger.Errorf("error creating image from layout: %v", err)
}
dstimg := dst + "/" + n[1]
tag, err := name.ParseReference(dstimg)
if err != nil {
o.logger.Errorf("err parsing destination image %s: %v", dstimg, err)
}
if err := remote.Write(tag, img); err != nil {
o.logger.Errorf("error writing image to destination registry %s: %v", dst, err)
}
}
return nil
}

View File

@@ -1,81 +0,0 @@
package app
import (
"io"
"os"
"time"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/spf13/cobra"
)
var (
loglevel string
timeout time.Duration
getLong = `hauler provides CLI-based air-gap migration assistance using k3s.
Choose your functionality and new a package when internet access is available,
then deploy the package into your air-gapped environment.
`
getExample = `
hauler pkg build
hauler pkg run pkg.tar.zst
hauler relocate artifacts artifacts.tar.zst
hauler relocate images pkg.tar.zst locahost:5000
hauler copy localhost:5000/artifacts:latest
`
)
type rootOpts struct {
logger log.Logger
}
var ro rootOpts
// NewRootCommand defines the root hauler command
func NewRootCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "hauler",
Short: "hauler provides CLI-based air-gap migration assistance",
Long: getLong,
Example: getExample,
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
l, err := setupCliLogger(os.Stdout, loglevel)
if err != nil {
return err
}
ro.logger = l
return nil
},
RunE: func(cmd *cobra.Command, _ []string) error {
return cmd.Help()
},
}
cobra.OnInitialize()
cmd.AddCommand(NewRelocateCommand())
cmd.AddCommand(NewCopyCommand())
cmd.AddCommand(NewPkgCommand())
f := cmd.PersistentFlags()
f.StringVarP(&loglevel, "loglevel", "l", "debug",
"Log level (debug, info, warn, error, fatal, panic)")
f.DurationVar(&timeout, "timeout", 1*time.Minute,
"TODO: timeout for operations")
return cmd
}
func setupCliLogger(out io.Writer, level string) (log.Logger, error) {
l := log.NewLogger(out)
return l, nil
}

110
cmd/hauler/cli/cli.go Normal file
View File

@@ -0,0 +1,110 @@
package cli
import (
"context"
"errors"
"os"
"path/filepath"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/pkg/cache"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
type rootOpts struct {
logLevel string
cacheDir string
storeDir string
}
const defaultStoreLocation = "haul"
var ro = &rootOpts{}
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "hauler",
Short: "",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
log.FromContext(cmd.Context()).SetLevel(ro.logLevel)
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
pf := cmd.PersistentFlags()
pf.StringVarP(&ro.logLevel, "log-level", "l", "info", "")
pf.StringVar(&ro.cacheDir, "cache", "", "Location of where to store cache data (defaults to $XDG_CACHE_DIR/hauler)")
pf.StringVarP(&ro.storeDir, "store", "s", "", "Location to create store at (defaults to $PWD/store)")
// Add subcommands
addDownload(cmd)
addStore(cmd)
return cmd
}
func (o *rootOpts) getStore(ctx context.Context) (*store.Store, error) {
lgr := log.FromContext(ctx)
dir := o.storeDir
if dir == "" {
lgr.Debugf("no store path specified, defaulting to $PWD/store")
pwd, err := os.Getwd()
if err != nil {
return nil, err
}
dir = filepath.Join(pwd, defaultStoreLocation)
}
abs, err := filepath.Abs(dir)
if err != nil {
return nil, err
}
lgr.Debugf("using store at %s", abs)
if _, err := os.Stat(abs); errors.Is(err, os.ErrNotExist) {
err := os.Mkdir(abs, os.ModePerm)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
// TODO: Do we want this to be configurable?
c, err := o.getCache(ctx)
if err != nil {
return nil, err
}
s := store.NewStore(ctx, abs, store.WithCache(c))
return s, nil
}
func (o *rootOpts) getCache(ctx context.Context) (cache.Cache, error) {
dir := o.cacheDir
if dir == "" {
// Default to $XDG_CACHE_DIR
cachedir, err := os.UserCacheDir()
if err != nil {
return nil, err
}
abs, _ := filepath.Abs(filepath.Join(cachedir, "hauler"))
if err := os.MkdirAll(abs, os.ModePerm); err != nil {
return nil, err
}
dir = abs
}
c := cache.NewFilesystem(dir)
return c, nil
}

View File

@@ -0,0 +1,42 @@
package cli
import (
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/cmd/hauler/cli/download"
)
func addDownload(parent *cobra.Command) {
o := &download.Opts{}
cmd := &cobra.Command{
Use: "download",
Short: "Download OCI content from a registry and populate it on disk",
Long: `Locate OCI content based on it's reference in a compatible registry and download the contents to disk.
Note that the content type determines it's format on disk. Hauler's built in content types act as follows:
- File: as a file named after the pushed contents source name (ex: my-file.yaml:latest --> my-file.yaml)
- Image: as a .tar named after the image (ex: alpine:latest --> alpine:latest.tar)
- Chart: as a .tar.gz named after the chart (ex: loki:2.0.2 --> loki-2.0.2.tar.gz)`,
Example: `
# Download a file
hauler dl my-file.yaml:latest
# Download an image
hauler dl rancher/k3s:v1.22.2-k3s2
# Download a chart
hauler dl longhorn:1.2.0`,
Aliases: []string{"dl"},
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, arg []string) error {
ctx := cmd.Context()
return download.Cmd(ctx, o, arg[0])
},
}
o.AddArgs(cmd)
parent.AddCommand(cmd)
}

View File

@@ -0,0 +1,129 @@
package download
import (
"context"
"encoding/json"
"fmt"
"path"
"github.com/containerd/containerd/remotes/docker"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/tarball"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/spf13/cobra"
"oras.land/oras-go/pkg/content"
"oras.land/oras-go/pkg/oras"
"github.com/rancherfederal/hauler/pkg/artifact/types"
"github.com/rancherfederal/hauler/pkg/log"
)
type Opts struct {
DestinationDir string
OutputFile string
}
func (o *Opts) AddArgs(cmd *cobra.Command) {
f := cmd.Flags()
f.StringVar(&o.DestinationDir, "dir", "", "Directory to save contents to (defaults to current directory)")
f.StringVarP(&o.OutputFile, "output", "o", "", "(Optional) Override name of file to save.")
}
func Cmd(ctx context.Context, o *Opts, reference string) error {
lgr := log.FromContext(ctx)
lgr.Debugf("running command `hauler download`")
cs := content.NewFileStore(o.DestinationDir)
defer cs.Close()
ref, err := name.ParseReference(reference)
if err != nil {
return err
}
// resolver := docker.NewResolver(docker.ResolverOptions{})
desc, err := remote.Get(ref)
if err != nil {
return err
}
manifestData, err := desc.RawManifest()
if err != nil {
return err
}
var manifest ocispec.Manifest
if err := json.Unmarshal(manifestData, &manifest); err != nil {
return err
}
// TODO: These need to be factored out into each of the contents own logic
switch manifest.Config.MediaType {
case types.DockerConfigJSON, types.OCIManifestSchema1:
lgr.Infof("identified [image] (%s) content", manifest.Config.MediaType)
img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
if err != nil {
return err
}
outputFile := o.OutputFile
if outputFile == "" {
outputFile = fmt.Sprintf("%s:%s.tar", path.Base(ref.Context().RepositoryStr()), ref.Identifier())
}
if err := tarball.WriteToFile(outputFile, ref, img); err != nil {
return err
}
lgr.Infof("downloaded [%s] to [%s]", ref.Name(), outputFile)
case types.FileMediaType:
lgr.Infof("identified [file] (%s) content", manifest.Config.MediaType)
fs := content.NewFileStore(o.DestinationDir)
resolver := docker.NewResolver(docker.ResolverOptions{})
mdesc, descs, err := oras.Pull(ctx, resolver, ref.Name(), fs)
if err != nil {
return err
}
lgr.Infof("downloaded [%d] files with digest [%s]", len(descs), mdesc)
case types.ChartLayerMediaType, types.ChartConfigMediaType:
lgr.Infof("identified [chart] (%s) content", manifest.Config.MediaType)
fs := content.NewFileStore(o.DestinationDir)
resolver := docker.NewResolver(docker.ResolverOptions{})
mdesc, _, err := oras.Pull(ctx, resolver, ref.Name(), fs)
if err != nil {
return err
}
lgr.Infof("downloaded chart [%s] with digest [%s]", "donkey", mdesc.Digest.String())
default:
return fmt.Errorf("unrecognized content type: %s", manifest.Config.MediaType)
}
return nil
}
func getManifest(ctx context.Context, ref string) (*remote.Descriptor, error) {
r, err := name.ParseReference(ref)
if err != nil {
return nil, fmt.Errorf("parsing reference %q: %v", ref, err)
}
desc, err := remote.Get(r, remote.WithContext(ctx))
if err != nil {
return nil, err
}
return desc, nil
}

View File

@@ -0,0 +1,38 @@
package download
import (
"context"
"testing"
)
func TestCmd(t *testing.T) {
ctx := context.Background()
type args struct {
ctx context.Context
o *Opts
reference string
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "should work",
args: args{
ctx: ctx,
o: &Opts{DestinationDir: ""},
reference: "localhost:3000/hauler/file.txt:latest",
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := Cmd(tt.args.ctx, tt.args.o, tt.args.reference); (err != nil) != tt.wantErr {
t.Errorf("Cmd() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

286
cmd/hauler/cli/store.go Normal file
View File

@@ -0,0 +1,286 @@
package cli
import (
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/cmd/hauler/cli/store"
)
func addStore(parent *cobra.Command) {
cmd := &cobra.Command{
Use: "store",
Short: "Interact with hauler's embedded content store",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(
addStoreSync(),
addStoreExtract(),
addStoreLoad(),
addStoreSave(),
addStoreServe(),
addStoreList(),
addStoreCopy(),
// TODO: Remove this in favor of sync?
addStoreAdd(),
)
parent.AddCommand(cmd)
}
func addStoreExtract() *cobra.Command {
o := &store.ExtractOpts{}
cmd := &cobra.Command{
Use: "extract",
Short: "Extract content from the store to disk",
Aliases: []string{"x"},
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := ro.getStore(ctx)
if err != nil {
return err
}
return store.ExtractCmd(ctx, o, s, args[0])
},
}
o.AddArgs(cmd)
return cmd
}
func addStoreSync() *cobra.Command {
o := &store.SyncOpts{}
cmd := &cobra.Command{
Use: "sync",
Short: "Sync content to the embedded content store",
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := ro.getStore(ctx)
if err != nil {
return err
}
return store.SyncCmd(ctx, o, s)
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreLoad() *cobra.Command {
o := &store.LoadOpts{}
cmd := &cobra.Command{
Use: "load",
Short: "Load a content store from a store archive",
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := ro.getStore(ctx)
if err != nil {
return err
}
return store.LoadCmd(ctx, o, s.DataDir, args...)
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreServe() *cobra.Command {
o := &store.ServeOpts{}
cmd := &cobra.Command{
Use: "serve",
Short: "Expose the content of a local store through an OCI compliant server",
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := ro.getStore(ctx)
if err != nil {
return err
}
return store.ServeCmd(ctx, o, s)
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreSave() *cobra.Command {
o := &store.SaveOpts{}
cmd := &cobra.Command{
Use: "save",
Short: "Save a content store to a store archive",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := ro.getStore(ctx)
if err != nil {
return err
}
return store.SaveCmd(ctx, o, o.FileName, s.DataDir)
},
}
o.AddArgs(cmd)
return cmd
}
func addStoreList() *cobra.Command {
o := &store.ListOpts{}
cmd := &cobra.Command{
Use: "list",
Short: "List all content references in a store",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := ro.getStore(ctx)
if err != nil {
return err
}
return store.ListCmd(ctx, o, s)
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreCopy() *cobra.Command {
o := &store.CopyOpts{}
cmd := &cobra.Command{
Use: "copy",
Short: "Copy all store contents to another OCI registry",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := ro.getStore(ctx)
if err != nil {
return err
}
return store.CopyCmd(ctx, o, s, args[0])
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreAdd() *cobra.Command {
cmd := &cobra.Command{
Use: "add",
Short: "Add content to store",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(
addStoreAddFile(),
addStoreAddImage(),
addStoreAddChart(),
)
return cmd
}
func addStoreAddFile() *cobra.Command {
o := &store.AddFileOpts{}
cmd := &cobra.Command{
Use: "file",
Short: "Add a file to the content store",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := ro.getStore(ctx)
if err != nil {
return err
}
return store.AddFileCmd(ctx, o, s, args[0])
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreAddImage() *cobra.Command {
o := &store.AddImageOpts{}
cmd := &cobra.Command{
Use: "image",
Short: "Add an image to the content store",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := ro.getStore(ctx)
if err != nil {
return err
}
return store.AddImageCmd(ctx, o, s, args[0])
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreAddChart() *cobra.Command {
o := &store.AddChartOpts{}
cmd := &cobra.Command{
Use: "chart",
Short: "Add a chart to the content store",
Example: `
# add a chart
hauler store add longhorn --repo "https://charts.longhorn.io"
# add a specific version of a chart
hauler store add chart rancher --repo "https://releases.rancher.com/server-charts/latest" --version "2.6.2"
`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := ro.getStore(ctx)
if err != nil {
return err
}
return store.AddChartCmd(ctx, o, s, args[0])
},
}
o.AddFlags(cmd)
return cmd
}

179
cmd/hauler/cli/store/add.go Normal file
View File

@@ -0,0 +1,179 @@
package store
import (
"context"
"path/filepath"
"github.com/google/go-containerregistry/pkg/name"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/content/chart"
"github.com/rancherfederal/hauler/pkg/content/file"
"github.com/rancherfederal/hauler/pkg/content/image"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
type AddFileOpts struct {
Name string
}
func (o *AddFileOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
f.StringVarP(&o.Name, "name", "n", "", "(Optional) Name to assign to file in store")
}
func AddFileCmd(ctx context.Context, o *AddFileOpts, s *store.Store, reference string) error {
lgr := log.FromContext(ctx)
lgr.Debugf("running cli command `hauler store add`")
s.Open()
defer s.Close()
cfg := v1alpha1.File{
Ref: reference,
Name: o.Name,
}
return storeFile(ctx, s, cfg)
}
func storeFile(ctx context.Context, s *store.Store, fi v1alpha1.File) error {
lgr := log.FromContext(ctx)
if fi.Name == "" {
base := filepath.Base(fi.Ref)
fi.Name = filepath.Base(fi.Ref)
lgr.Warnf("no name specified for file reference [%s], using base filepath: [%s]", fi.Ref, base)
}
oci, err := file.NewFile(fi.Ref, fi.Name)
if err != nil {
return err
}
ref, err := name.ParseReference(fi.Name, name.WithDefaultRegistry(""))
if err != nil {
return err
}
desc, err := s.AddArtifact(ctx, oci, ref)
if err != nil {
return err
}
lgr.Infof("added file [%s] to store at [%s] with manifest digest [%s]", fi.Ref, ref.Name(), desc.Digest.String())
return nil
}
type AddImageOpts struct {
Name string
}
func (o *AddImageOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
_ = f
}
func AddImageCmd(ctx context.Context, o *AddImageOpts, s *store.Store, reference string) error {
lgr := log.FromContext(ctx)
lgr.Debugf("running cli command `hauler store add image`")
s.Open()
defer s.Close()
cfg := v1alpha1.Image{
Ref: reference,
}
return storeImage(ctx, s, cfg)
}
func storeImage(ctx context.Context, s *store.Store, i v1alpha1.Image) error {
lgr := log.FromContext(ctx)
oci, err := image.NewImage(i.Ref)
if err != nil {
return err
}
ref, err := name.ParseReference(i.Ref)
if err != nil {
return err
}
desc, err := s.AddArtifact(ctx, oci, ref)
if err != nil {
return err
}
lgr.Infof("added image [%s] to store at [%s] with manifest digest [%s]", i.Ref, ref.Context().RepositoryStr(), desc.Digest.String())
return nil
}
type AddChartOpts struct {
Version string
RepoURL string
// TODO: Support helm auth
Username string
Password string
PassCredentialsAll bool
CertFile string
KeyFile string
CaFile string
InsecureSkipTLSverify bool
RepositoryConfig string
RepositoryCache string
}
func (o *AddChartOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
f.StringVarP(&o.RepoURL, "repo", "r", "", "Chart repository URL")
f.StringVar(&o.Version, "version", "", "(Optional) Version of the chart to download, defaults to latest if not specified")
}
func AddChartCmd(ctx context.Context, o *AddChartOpts, s *store.Store, chartName string) error {
lgr := log.FromContext(ctx)
lgr.Debugf("running cli command `hauler store add chart`")
s.Open()
defer s.Close()
cfg := v1alpha1.Chart{
Name: chartName,
RepoURL: o.RepoURL,
Version: o.Version,
}
return storeChart(ctx, s, cfg)
}
func storeChart(ctx context.Context, s *store.Store, ch v1alpha1.Chart) error {
lgr := log.FromContext(ctx)
oci, err := chart.NewChart(ch.Name, ch.RepoURL, ch.Version)
if err != nil {
return err
}
tag := ch.Version
if tag == "" {
tag = name.DefaultTag
}
ref, err := name.ParseReference(ch.Name, name.WithDefaultRegistry(""), name.WithDefaultTag(tag))
if err != nil {
return err
}
desc, err := s.AddArtifact(ctx, oci, ref)
if err != nil {
return err
}
lgr.Infof("added chart [%s] to store at [%s:%s] with manifest digest [%s]", ch.Name, ref.Context().RepositoryStr(), ref.Identifier(), desc.Digest.String())
return nil
}

View File

@@ -0,0 +1,58 @@
package store
import (
"context"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
type CopyOpts struct{}
func (o *CopyOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
_ = f
// TODO: Regex matching
}
func CopyCmd(ctx context.Context, o *CopyOpts, s *store.Store, registry string) error {
lgr := log.FromContext(ctx)
lgr.Debugf("running cli command `hauler store copy`")
s.Open()
defer s.Close()
refs, err := s.List(ctx)
if err != nil {
return err
}
for _, r := range refs {
ref, err := name.ParseReference(r, name.WithDefaultRegistry(s.Registry()))
if err != nil {
return err
}
o, err := remote.Image(ref)
if err != nil {
return err
}
rref, err := name.ParseReference(r, name.WithDefaultRegistry(registry))
if err != nil {
return err
}
lgr.Infof("relocating [%s] -> [%s]", ref.Name(), rref.Name())
if err := remote.Write(rref, o); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,41 @@
package store
import (
"context"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/cmd/hauler/cli/download"
"github.com/rancherfederal/hauler/pkg/layout"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
type ExtractOpts struct {
DestinationDir string
}
func (o *ExtractOpts) AddArgs(cmd *cobra.Command) {
f := cmd.Flags()
f.StringVar(&o.DestinationDir, "dir", "", "Directory to save contents to (defaults to current directory)")
}
func ExtractCmd(ctx context.Context, o *ExtractOpts, s *store.Store, reference string) error {
l := log.FromContext(ctx)
l.Debugf("running command `hauler store extract`")
s.Open()
defer s.Close()
eref, err := layout.RelocateReference(reference, s.Registry())
if err != nil {
return err
}
gopts := &download.Opts{
DestinationDir: o.DestinationDir,
}
return download.Cmd(ctx, gopts, eref.Name())
}

View File

@@ -0,0 +1,52 @@
package store
import (
"context"
"fmt"
"os"
"text/tabwriter"
"github.com/google/go-containerregistry/pkg/name"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
type ListOpts struct{}
func (o *ListOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
_ = f
// TODO: Regex matching
}
func ListCmd(ctx context.Context, o *ListOpts, s *store.Store) error {
lgr := log.FromContext(ctx)
lgr.Debugf("running cli command `hauler store list`")
s.Open()
defer s.Close()
refs, err := s.List(ctx)
if err != nil {
return err
}
// TODO: Just use a tabler library
tw := tabwriter.NewWriter(os.Stdout, 8, 12, 4, '\t', 0)
defer tw.Flush()
fmt.Fprintf(tw, "#\tReference\tIdentifier\n")
for i, r := range refs {
ref, err := name.ParseReference(r, name.WithDefaultRegistry(""))
if err != nil {
return err
}
fmt.Fprintf(tw, "%d\t%s\t%s\n", i, ref.Context().String(), ref.Identifier())
}
return nil
}

View File

@@ -0,0 +1,38 @@
package store
import (
"context"
"github.com/mholt/archiver/v3"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/pkg/log"
)
type LoadOpts struct{}
func (o *LoadOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
_ = f
}
// LoadCmd
// TODO: Just use mholt/archiver for now, even though we don't need most of it
func LoadCmd(ctx context.Context, o *LoadOpts, dir string, archiveRefs ...string) error {
l := log.FromContext(ctx)
l.Debugf("running command `hauler store load`")
// TODO: Support more formats?
a := archiver.NewTarZstd()
a.OverwriteExisting = true
for _, archiveRef := range archiveRefs {
l.Infof("Loading content from %s to %s", archiveRef, dir)
err := a.Unarchive(archiveRef, dir)
if err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,55 @@
package store
import (
"context"
"os"
"path/filepath"
"github.com/mholt/archiver/v3"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/pkg/log"
)
type SaveOpts struct {
FileName string
}
func (o *SaveOpts) AddArgs(cmd *cobra.Command) {
f := cmd.Flags()
f.StringVarP(&o.FileName, "filename", "f", "pkg.tar.zst", "Name of archive")
}
// SaveCmd
// TODO: Just use mholt/archiver for now, even though we don't need most of it
func SaveCmd(ctx context.Context, o *SaveOpts, outputFile string, dir string) error {
l := log.FromContext(ctx)
l.Debugf("running command `hauler store save`")
// TODO: Support more formats?
a := archiver.NewTarZstd()
a.OverwriteExisting = true
absOutputfile, err := filepath.Abs(outputFile)
if err != nil {
return err
}
l.Infof("Saving data dir (%s) as compressed archive to %s", dir, absOutputfile)
cwd, err := os.Getwd()
if err != nil {
return err
}
defer os.Chdir(cwd)
if err := os.Chdir(dir); err != nil {
return err
}
err = a.Archive([]string{"."}, absOutputfile)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,85 @@
package store
import (
"context"
"fmt"
"net/http"
"os"
"github.com/distribution/distribution/v3/configuration"
"github.com/distribution/distribution/v3/registry"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
type ServeOpts struct {
Port int
ConfigFile string
storedir string
}
func (o *ServeOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
f.IntVarP(&o.Port, "port", "p", 5000, "Port to listen on")
f.StringVarP(&o.ConfigFile, "config", "c", "", "Path to a config file, will override all other configs")
}
// ServeCmd does
func ServeCmd(ctx context.Context, o *ServeOpts, s *store.Store) error {
l := log.FromContext(ctx)
l.Debugf("running command `hauler store serve`")
cfg := o.defaultConfig(s)
if o.ConfigFile != "" {
ucfg, err := loadConfig(o.ConfigFile)
if err != nil {
return err
}
cfg = ucfg
}
r, err := registry.NewRegistry(ctx, cfg)
if err != nil {
return err
}
l.Infof("Starting registry listening on :%d", o.Port)
if err = r.ListenAndServe(); err != nil {
return err
}
return nil
}
func loadConfig(filename string) (*configuration.Configuration, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
return configuration.Parse(f)
}
func (o *ServeOpts) defaultConfig(s *store.Store) *configuration.Configuration {
cfg := &configuration.Configuration{
Version: "0.1",
Storage: configuration.Storage{
"cache": configuration.Parameters{"blobdescriptor": "inmemory"},
"filesystem": configuration.Parameters{"rootdirectory": s.DataDir},
// TODO: Ensure this is toggleable via cli arg if necessary
"maintenance": configuration.Parameters{"readonly.enabled": true},
},
}
cfg.Log.Level = "info"
cfg.HTTP.Addr = fmt.Sprintf(":%d", o.Port)
cfg.HTTP.Headers = http.Header{
"X-Content-Type-Options": []string{"nosniff"},
}
return cfg
}

View File

@@ -0,0 +1,154 @@
package store
import (
"bufio"
"context"
"fmt"
"io"
"os"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/util/yaml"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/collection/chart"
"github.com/rancherfederal/hauler/pkg/collection/k3s"
"github.com/rancherfederal/hauler/pkg/content"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
type SyncOpts struct {
ContentFiles []string
}
func (o *SyncOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
f.StringSliceVarP(&o.ContentFiles, "files", "f", []string{}, "Path to content files")
}
func SyncCmd(ctx context.Context, o *SyncOpts, s *store.Store) error {
l := log.FromContext(ctx)
l.Debugf("running cli command `hauler store sync`")
// Start from an empty store (contents are cached elsewhere)
l.Debugf("flushing any existing content in store: %s", s.DataDir)
if err := s.Flush(ctx); err != nil {
return err
}
s.Open()
defer s.Close()
for _, filename := range o.ContentFiles {
l.Debugf("processing content file: '%s'", filename)
fi, err := os.Open(filename)
if err != nil {
return err
}
reader := yaml.NewYAMLReader(bufio.NewReader(fi))
var docs [][]byte
for {
raw, err := reader.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
docs = append(docs, raw)
}
for _, doc := range docs {
obj, err := content.Load(doc)
if err != nil {
return err
}
l.Infof("syncing [%s] to [%s]", obj.GroupVersionKind().String(), s.DataDir)
// TODO: Should type switch instead...
switch obj.GroupVersionKind().Kind {
case v1alpha1.FilesContentKind:
var cfg v1alpha1.Files
if err := yaml.Unmarshal(doc, &cfg); err != nil {
return err
}
for _, f := range cfg.Spec.Files {
err := storeFile(ctx, s, f)
if err != nil {
return err
}
}
case v1alpha1.ImagesContentKind:
var cfg v1alpha1.Images
if err := yaml.Unmarshal(doc, &cfg); err != nil {
return err
}
for _, i := range cfg.Spec.Images {
err := storeImage(ctx, s, i)
if err != nil {
return err
}
}
case v1alpha1.ChartsContentKind:
var cfg v1alpha1.Charts
if err := yaml.Unmarshal(doc, &cfg); err != nil {
return err
}
for _, ch := range cfg.Spec.Charts {
err := storeChart(ctx, s, ch)
if err != nil {
return err
}
}
case v1alpha1.K3sCollectionKind:
var cfg v1alpha1.K3s
if err := yaml.Unmarshal(doc, &cfg); err != nil {
return err
}
k, err := k3s.NewK3s(cfg.Spec.Version)
if err != nil {
return err
}
if _, err := s.AddCollection(ctx, k); err != nil {
return err
}
case v1alpha1.ChartsCollectionKind:
var cfg v1alpha1.ThickCharts
if err := yaml.Unmarshal(doc, &cfg); err != nil {
return err
}
for _, cfg := range cfg.Spec.Charts {
tc, err := chart.NewChart(cfg.Name, cfg.RepoURL, cfg.Version)
if err != nil {
return err
}
if _, err := s.AddCollection(ctx, tc); err != nil {
return err
}
}
default:
return fmt.Errorf("unrecognized content/collection type: %s", obj.GroupVersionKind().String())
}
}
}
return nil
}

View File

@@ -1,15 +1,21 @@
package main
import (
"log"
"context"
"os"
"github.com/rancherfederal/hauler/cmd/hauler/app"
"github.com/rancherfederal/hauler/cmd/hauler/cli"
"github.com/rancherfederal/hauler/pkg/log"
)
func main() {
root := app.NewRootCommand()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if err := root.Execute(); err != nil {
log.Fatalln(err)
logger := log.NewLogger(os.Stdout)
ctx = logger.WithContext(ctx)
if err := cli.New().ExecuteContext(ctx); err != nil {
logger.Errorf("%v", err)
}
}

214
go.mod
View File

@@ -1,70 +1,162 @@
module github.com/rancherfederal/hauler
go 1.16
go 1.17
require (
cloud.google.com/go/storage v1.8.0 // indirect
github.com/Microsoft/go-winio v0.5.0 // indirect
github.com/containerd/containerd v1.5.0-beta.4
github.com/deislabs/oras v0.11.1
github.com/docker/docker v20.10.6+incompatible // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/google/go-containerregistry v0.5.1
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/imdario/mergo v0.3.12
github.com/klauspost/compress v1.13.0 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/containerd/containerd v1.5.7
github.com/distribution/distribution/v3 v3.0.0-20210926092439-1563384b69df
github.com/google/go-containerregistry v0.6.0
github.com/mholt/archiver/v3 v3.5.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/otiai10/copy v1.6.0
github.com/pterm/pterm v0.12.24
github.com/rancher/fleet v0.3.5
github.com/rancher/fleet/pkg/apis v0.0.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.1
github.com/rancher/wrangler v0.8.4
github.com/rs/zerolog v1.26.0
github.com/sirupsen/logrus v1.8.1
github.com/spf13/afero v1.6.0
github.com/spf13/cobra v1.1.3
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 // indirect
golang.org/x/tools v0.1.3 // indirect
google.golang.org/genproto v0.0.0-20210524171403-669157292da3 // indirect
google.golang.org/grpc v1.38.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
helm.sh/helm/v3 v3.5.1
k8s.io/apimachinery v0.21.1
k8s.io/cli-runtime v0.20.2
k8s.io/client-go v11.0.1-0.20190816222228-6d55c1b1f1ca+incompatible
sigs.k8s.io/cli-utils v0.23.1
sigs.k8s.io/controller-runtime v0.9.0
sigs.k8s.io/yaml v1.2.0
github.com/spf13/cobra v1.2.1
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
helm.sh/helm/v3 v3.7.1
k8s.io/apimachinery v0.22.2
k8s.io/client-go v0.22.2
oras.land/oras-go v0.4.0
sigs.k8s.io/controller-runtime v0.10.3
)
replace (
github.com/rancher/fleet/pkg/apis v0.0.0 => github.com/rancher/fleet/pkg/apis v0.0.0-20210604212701-3a76c78716ab
helm.sh/helm/v3 => github.com/rancher/helm/v3 v3.3.3-fleet1
k8s.io/api => k8s.io/api v0.20.2
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.2 // indirect
k8s.io/apimachinery => k8s.io/apimachinery v0.20.2 // indirect
k8s.io/apiserver => k8s.io/apiserver v0.20.2
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.2
k8s.io/client-go => github.com/rancher/client-go v0.20.0-fleet1
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.2
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.2
k8s.io/code-generator => k8s.io/code-generator v0.20.2
k8s.io/component-base => k8s.io/component-base v0.20.2
k8s.io/component-helpers => k8s.io/component-helpers v0.20.2
k8s.io/controller-manager => k8s.io/controller-manager v0.20.2
k8s.io/cri-api => k8s.io/cri-api v0.20.2
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.2
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.2
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.2
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.2
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.2
k8s.io/kubectl => k8s.io/kubectl v0.20.2
k8s.io/kubelet => k8s.io/kubelet v0.20.2
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.2
k8s.io/metrics => k8s.io/metrics v0.20.2
k8s.io/mount-utils => k8s.io/mount-utils v0.20.2
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.2
require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.1.1 // indirect
github.com/Masterminds/sprig/v3 v3.2.2 // indirect
github.com/Masterminds/squirrel v1.5.0 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect
github.com/andybalholm/brotli v1.0.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd // indirect
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b // indirect
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.7.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/cli v20.10.7+incompatible // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/docker v20.10.7+incompatible // indirect
github.com/docker/docker-credential-helpers v0.6.3 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 // indirect
github.com/dsnet/compress v0.0.1 // indirect
github.com/evanphx/json-patch v4.11.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
github.com/fatih/color v1.7.0 // indirect
github.com/felixge/httpsnoop v1.0.1 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-errors/errors v1.0.1 // indirect
github.com/go-logr/logr v0.4.0 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.3 // indirect
github.com/gomodule/redigo v1.8.2 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.2.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/gorilla/handlers v1.5.1 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
github.com/huandu/xstrings v1.3.2 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmoiron/sqlx v1.3.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.11 // indirect
github.com/klauspost/compress v1.13.0 // indirect
github.com/klauspost/pgzip v1.2.4 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/lib/pq v1.10.0 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-colorable v0.0.9 // indirect
github.com/mattn/go-isatty v0.0.4 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/copystructure v1.1.1 // indirect
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
github.com/mitchellh/reflectwalk v1.0.1 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/nwaples/rardecode v1.1.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pierrec/lz4/v4 v4.0.3 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/rancher/lasso v0.0.0-20210616224652-fc3ebd901c08 // indirect
github.com/rubenv/sql-migrate v0.0.0-20210614095031-55d5740dbbcc // indirect
github.com/russross/blackfriday v1.5.2 // indirect
github.com/shopspring/decimal v1.2.0 // indirect
github.com/spf13/cast v1.3.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/testify v1.7.0 // indirect
github.com/ulikunitz/xz v0.5.7 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 // indirect
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 // indirect
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d // indirect
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 // indirect
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect
google.golang.org/grpc v1.38.0 // indirect
google.golang.org/protobuf v1.26.0 // indirect
gopkg.in/gorp.v1 v1.7.2 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/api v0.22.2 // indirect
k8s.io/apiextensions-apiserver v0.22.2 // indirect
k8s.io/apiserver v0.22.2 // indirect
k8s.io/cli-runtime v0.22.1 // indirect
k8s.io/component-base v0.22.2 // indirect
k8s.io/klog/v2 v2.9.0 // indirect
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e // indirect
k8s.io/kubectl v0.22.1 // indirect
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a // indirect
sigs.k8s.io/kustomize/api v0.8.11 // indirect
sigs.k8s.io/kustomize/kyaml v0.11.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)

1295
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,44 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
ChartsContentKind = "Charts"
ChartsCollectionKind = "ThickCharts"
)
type Charts struct {
*metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ChartSpec `json:"spec,omitempty"`
}
type ChartSpec struct {
Charts []Chart `json:"charts,omitempty"`
}
type Chart struct {
Name string `json:"name"`
RepoURL string `json:"repoURL"`
Version string `json:"version"`
}
type ThickCharts struct {
*metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ChartSpec `json:"spec,omitempty"`
}
type ThickChartSpec struct {
ThickCharts []ThickChart `json:"charts,omitempty"`
}
type ThickChart struct {
Name string `json:"name"`
RepoURL string `json:"repoURL"`
Version string `json:"version"`
}

View File

@@ -1,91 +1,21 @@
package v1alpha1
import (
"sigs.k8s.io/cli-utils/pkg/object"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type Drive interface {
Images() ([]string, error)
BinURL() string
const (
DriverContentKind = "Driver"
)
LibPath() string
EtcPath() string
Config() (*map[string]interface{}, error)
SystemObjects() (objs []object.ObjMetadata)
type Driver struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DriverSpec `json:"spec"`
}
//Driver
type Driver struct {
type DriverSpec struct {
Type string `json:"type"`
Version string `json:"version"`
}
////TODO: Don't hardcode this
//func (k k3s) BinURL() string {
// return "https://github.com/k3s-io/k3s/releases/download/v1.21.1%2Bk3s1/k3s"
//}
//
//func (k k3s) PackageImages() ([]string, error) {
// //TODO: Replace this with a query to images.txt on release page
// return []string{
// "docker.io/rancher/coredns-coredns:1.8.3",
// "docker.io/rancher/klipper-helm:v0.5.0-build20210505",
// "docker.io/rancher/klipper-lb:v0.2.0",
// "docker.io/rancher/library-busybox:1.32.1",
// "docker.io/rancher/library-traefik:2.4.8",
// "docker.io/rancher/local-path-provisioner:v0.0.19",
// "docker.io/rancher/metrics-server:v0.3.6",
// "docker.io/rancher/pause:3.1",
// }, nil
//}
//
//func (k k3s) Config() (*map[string]interface{}, error) {
// // TODO: This should be typed
// c := make(map[string]interface{})
// c["write-kubeconfig-mode"] = "0644"
//
// //TODO: Add uid or something to ensure this works for multi-node setups
// c["node-name"] = "hauler"
//
// return &c, nil
//}
//
//func (k k3s) SystemObjects() (objs []object.ObjMetadata) {
// //TODO: Make sure this matches up with specified config disables
// for _, dep := range []string{"coredns", "local-path-provisioner", "metrics-server"} {
// objMeta, _ := object.CreateObjMetadata("kube-system", dep, schema.GroupKind{Kind: "Deployment", Group: "apps"})
// objs = append(objs, objMeta)
// }
// return objs
//}
//
//func (k k3s) LibPath() string { return "/var/lib/rancher/k3s" }
//func (k k3s) EtcPath() string { return "/etc/rancher/k3s" }
//
////TODO: Implement rke2 as a driver
//type rke2 struct{}
//
//func (r rke2) PackageImages() ([]string, error) { return []string{}, nil }
//func (r rke2) BinURL() string { return "" }
//func (r rke2) LibPath() string { return "" }
//func (r rke2) EtcPath() string { return "" }
//func (r rke2) Config() (*map[string]interface{}, error) { return nil, nil }
//func (r rke2) SystemObjects() (objs []object.ObjMetadata) { return objs }
//
////NewDriver will return the appropriate driver given a kind, defaults to k3s
//func NewDriver(kind string) Drive {
// var d Drive
// switch kind {
// case "rke2":
// //TODO
// d = rke2{}
//
// default:
// d = k3s{
// dataDir: "/var/lib/rancher/k3s",
// etcDir: "/etc/rancher/k3s",
// }
// }
//
// return d
//}

View File

@@ -0,0 +1,23 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const FilesContentKind = "Files"
type Files struct {
*metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec FileSpec `json:"spec,omitempty"`
}
type FileSpec struct {
Files []File `json:"files,omitempty"`
}
type File struct {
Ref string `json:"ref"`
Name string `json:"name,omitempty"`
}

View File

@@ -1,32 +0,0 @@
package v1alpha1
import (
"fmt"
"strings"
)
//Fleet is used as the deployment engine for all things Hauler
type Fleet struct {
//Version of fleet to package and use in deployment
Version string `json:"version"`
}
//TODO: These should be identified from the chart version
func (f Fleet) Images() ([]string, error) {
return []string{
fmt.Sprintf("rancher/gitjob:v0.1.15"),
fmt.Sprintf("rancher/fleet:%s", f.Version),
fmt.Sprintf("rancher/fleet-agent:%s", f.Version),
}, nil
}
func (f Fleet) CRDChart() string {
return fmt.Sprintf("https://github.com/rancher/fleet/releases/download/%s/fleet-crd-%s.tgz", f.Version, f.VLess())
}
func (f Fleet) Chart() string {
return fmt.Sprintf("https://github.com/rancher/fleet/releases/download/%s/fleet-%s.tgz", f.Version, f.VLess())
}
func (f Fleet) VLess() string {
return strings.ReplaceAll(f.Version, "v", "")
}

View File

@@ -0,0 +1,19 @@
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
const (
Version = "v1alpha1"
ContentGroup = "content.hauler.cattle.io"
CollectionGroup = "collection.hauler.cattle.io"
)
var (
ContentGroupVersion = schema.GroupVersion{Group: ContentGroup, Version: Version}
SchemeBuilder = &scheme.Builder{GroupVersion: ContentGroupVersion}
CollectionGroupVersion = schema.GroupVersion{Group: CollectionGroup, Version: Version}
)

View File

@@ -0,0 +1,22 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const ImagesContentKind = "Images"
type Images struct {
*metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ImageSpec `json:"spec,omitempty"`
}
type ImageSpec struct {
Images []Image `json:"images,omitempty"`
}
type Image struct {
Ref string `json:"ref"`
}

View File

@@ -0,0 +1,17 @@
package v1alpha1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
const K3sCollectionKind = "K3s"
type K3s struct {
*metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec K3sSpec `json:"spec,omitempty"`
}
type K3sSpec struct {
Version string `json:"version"`
Arch string `json:"arch"`
}

View File

@@ -1,53 +0,0 @@
package v1alpha1
import (
"os"
"path/filepath"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
const (
BundlesDir = "bundles"
LayoutDir = "layout"
BinDir = "bin"
ChartDir = "charts"
PackageFile = "package.json"
)
type Package struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PackageSpec `json:"spec"`
}
type PackageSpec struct {
Fleet Fleet `json:"fleet"`
Driver Driver `json:"driver"`
// Paths is the list of directories relative to the working directory contains all resources to be bundled.
// path globbing is supported, for example [ "charts/*" ] will match all folders as a subdirectory of charts/
// If empty, "/" is the default
Paths []string `json:"paths,omitempty"`
Images []string `json:"images,omitempty"`
}
//LoadPackageFromDir will load an existing package from a directory on disk, it fails if no PackageFile is found in dir
func LoadPackageFromDir(path string) (Package, error) {
data, err := os.ReadFile(filepath.Join(path, PackageFile))
if err != nil {
return Package{}, err
}
var p Package
if err := yaml.Unmarshal(data, &p); err != nil {
return Package{}, err
}
return p, nil
}

10
pkg/artifact/config.go Normal file
View File

@@ -0,0 +1,10 @@
package artifact
import v1 "github.com/google/go-containerregistry/pkg/v1"
type Config interface {
// Raw returns the config bytes
Raw() ([]byte, error)
Descriptor() (v1.Descriptor, error)
}

127
pkg/artifact/local/layer.go Normal file
View File

@@ -0,0 +1,127 @@
package local
import (
"io"
v1 "github.com/google/go-containerregistry/pkg/v1"
gtypes "github.com/google/go-containerregistry/pkg/v1/types"
"github.com/rancherfederal/hauler/pkg/artifact/types"
)
type Opener func() (io.ReadCloser, error)
func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) {
var err error
layer := &layer{
mediaType: types.UnknownLayer,
annotations: make(map[string]string, 1),
}
layer.uncompressedOpener = opener
layer.compressedOpener = func() (io.ReadCloser, error) {
rc, err := opener()
if err != nil {
return nil, err
}
// TODO: actually compress this
return rc, nil
}
for _, opt := range opts {
opt(layer)
}
if layer.digest, layer.size, err = compute(layer.uncompressedOpener); err != nil {
return nil, err
}
if layer.diffID, _, err = compute(layer.compressedOpener); err != nil {
return nil, err
}
return layer, nil
}
func compute(opener Opener) (v1.Hash, int64, error) {
rc, err := opener()
if err != nil {
return v1.Hash{}, 0, err
}
defer rc.Close()
return v1.SHA256(rc)
}
type LayerOption func(*layer)
func WithMediaType(mt string) LayerOption {
return func(l *layer) {
l.mediaType = mt
}
}
func WithAnnotations(annotations map[string]string) LayerOption {
return func(l *layer) {
if l.annotations == nil {
l.annotations = make(map[string]string)
}
l.annotations = annotations
}
}
type layer struct {
digest v1.Hash
diffID v1.Hash
size int64
compressedOpener Opener
uncompressedOpener Opener
mediaType string
annotations map[string]string
urls []string
}
func (l layer) Descriptor() (*v1.Descriptor, error) {
digest, err := l.Digest()
if err != nil {
return nil, err
}
mt, err := l.MediaType()
if err != nil {
return nil, err
}
return &v1.Descriptor{
MediaType: mt,
Size: l.size,
Digest: digest,
Annotations: l.annotations,
URLs: l.urls,
// TODO: Allow platforms
Platform: nil,
}, nil
}
func (l layer) Digest() (v1.Hash, error) {
return l.digest, nil
}
func (l layer) DiffID() (v1.Hash, error) {
return l.diffID, nil
}
func (l layer) Compressed() (io.ReadCloser, error) {
return l.compressedOpener()
}
func (l layer) Uncompressed() (io.ReadCloser, error) {
return l.uncompressedOpener()
}
func (l layer) Size() (int64, error) {
return l.size, nil
}
func (l layer) MediaType() (gtypes.MediaType, error) {
return gtypes.MediaType(l.mediaType), nil
}

24
pkg/artifact/oci.go Normal file
View File

@@ -0,0 +1,24 @@
package artifact
import (
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1"
)
// OCI is the bare minimum we need to represent an artifact in an oci layout
// At a high level, it is not constrained by an Image's config, manifests, and layer ordinality
// This specific implementation fully encapsulates v1.Layer's within a more generic form
type OCI interface {
MediaType() string
Manifest() (*v1.Manifest, error)
RawConfig() ([]byte, error)
Layers() ([]v1.Layer, error)
}
type Collection interface {
// Contents returns the list of contents in the collection
Contents() (map[name.Reference]OCI, error)
}

View File

@@ -0,0 +1,26 @@
package types
const (
OCIManifestSchema1 = "application/vnd.oci.image.manifest.v1+json"
DockerManifestSchema2 = "application/vnd.docker.distribution.manifest.v2+json"
DockerConfigJSON = "application/vnd.docker.container.image.v1+json"
UnknownManifest = "application/vnd.hauler.cattle.io.unknown.v1+json"
UnknownLayer = "application/vnd.content.hauler.unknown.layer"
FileLayerMediaType = "application/vnd.content.hauler.file.layer.v1"
FileMediaType = "application/vnd.content.hauler.file.config.v1+json"
// ConfigMediaType is the reserved media type for the Helm chart manifest config
ChartConfigMediaType = "application/vnd.cncf.helm.config.v1+json"
// ChartLayerMediaType is the reserved media type for Helm chart package content
ChartLayerMediaType = "application/vnd.cncf.helm.chart.content.v1.tar+gzip"
// ProvLayerMediaType is the reserved media type for Helm chart provenance files
ProvLayerMediaType = "application/vnd.cncf.helm.chart.provenance.v1.prov"
OCIVendorPrefix = "vnd.oci"
DockerVendorPrefix = "vnd.docker"
HaulerVendorPrefix = "vnd.hauler"
OCIImageIndexFile = "index.json"
)

View File

@@ -1,180 +0,0 @@
package bootstrap
import (
"bytes"
"context"
"fmt"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/otiai10/copy"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/driver"
"github.com/rancherfederal/hauler/pkg/fs"
"github.com/rancherfederal/hauler/pkg/log"
"helm.sh/helm/v3/pkg/chart/loader"
"io"
"os"
"path/filepath"
)
type Booter interface {
Init() error
PreBoot(context.Context) error
Boot(context.Context, driver.Driver) error
PostBoot(context.Context, driver.Driver) error
}
type booter struct {
Package v1alpha1.Package
fs fs.PkgFs
logger log.Logger
}
//NewBooter will build a new booter given a path to a directory containing a hauler package.json
func NewBooter(pkgPath string, logger log.Logger) (*booter, error) {
pkg, err := v1alpha1.LoadPackageFromDir(pkgPath)
if err != nil {
return nil, err
}
fsys := fs.NewPkgFS(pkgPath)
return &booter{
Package: pkg,
fs: fsys,
logger: logger,
}, nil
}
func (b booter) PreBoot(ctx context.Context, d driver.Driver) error {
b.logger.Infof("Beginning pre boot")
//TODO: Feel like there's a better way to do all this dir creation
if err := os.MkdirAll(d.DataPath(), os.ModePerm); err != nil {
return err
}
if err := b.moveBin(); err != nil {
return err
}
if err := b.moveImages(d); err != nil {
return err
}
if err := b.moveBundles(d); err != nil {
return err
}
if err := b.moveCharts(d); err != nil {
return err
}
b.logger.Debugf("Writing %s config", d.Name())
if err := d.WriteConfig(); err != nil {
return err
}
b.logger.Successf("Completed pre boot")
return nil
}
func (b booter) Boot(ctx context.Context, d driver.Driver) error {
b.logger.Infof("Beginning boot")
var stdoutBuf, stderrBuf bytes.Buffer
out := io.MultiWriter(os.Stdout, &stdoutBuf, &stderrBuf)
err := d.Start(out)
if err != nil {
return err
}
b.logger.Infof("Waiting for driver core components to provision...")
waitErr := waitForDriver(ctx, d)
if waitErr != nil {
return err
}
b.logger.Successf("Completed boot")
return nil
}
func (b booter) PostBoot(ctx context.Context, d driver.Driver) error {
b.logger.Infof("Beginning post boot")
cf := NewBootConfig("fleet-system", d.KubeConfigPath())
fleetCrdChartPath := b.fs.Chart().Path(fmt.Sprintf("fleet-crd-%s.tgz", b.Package.Spec.Fleet.VLess()))
fleetCrdChart, err := loader.Load(fleetCrdChartPath)
if err != nil {
return err
}
b.logger.Infof("Installing fleet crds")
fleetCrdRelease, fleetCrdErr := installChart(cf, fleetCrdChart, "fleet-crd", nil, b.logger)
if fleetCrdErr != nil {
return fleetCrdErr
}
b.logger.Infof("Installed '%s' to namespace '%s'", fleetCrdRelease.Name, fleetCrdRelease.Namespace)
fleetChartPath := b.fs.Chart().Path(fmt.Sprintf("fleet-%s.tgz", b.Package.Spec.Fleet.VLess()))
fleetChart, err := loader.Load(fleetChartPath)
if err != nil {
return err
}
b.logger.Infof("Installing fleet")
fleetRelease, fleetErr := installChart(cf, fleetChart, "fleet", nil, b.logger)
if fleetErr != nil {
return fleetErr
}
b.logger.Infof("Installed '%s' to namespace '%s'", fleetRelease.Name, fleetRelease.Namespace)
b.logger.Successf("Completed post boot")
return nil
}
//TODO: Move* will actually just copy. This is more expensive, but is much safer/easier at handling deep merges, should this change?
func (b booter) moveBin() error {
path := filepath.Join("/opt/hauler/bin")
if err := os.MkdirAll(path, os.ModePerm); err != nil {
return err
}
return copy.Copy(b.fs.Bin().Path(), path)
}
func (b booter) moveImages(d driver.Driver) error {
//NOTE: archives are not recursively searched, this _must_ be at the images dir
path := d.DataPath("agent/images")
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
refs, err := b.fs.MapLayout()
if err != nil {
return err
}
return tarball.MultiRefWriteToFile(filepath.Join(path, "hauler.tar"), refs)
}
func (b booter) moveBundles(d driver.Driver) error {
path := d.DataPath("server/manifests/hauler")
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
return copy.Copy(b.fs.Bundle().Path(), path)
}
func (b booter) moveCharts(d driver.Driver) error {
path := d.DataPath("server/static/charts/hauler")
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
return copy.Copy(b.fs.Chart().Path(), path)
}

View File

@@ -1,29 +0,0 @@
package bootstrap
import (
"k8s.io/cli-runtime/pkg/genericclioptions"
)
type BootSettings struct {
config *genericclioptions.ConfigFlags
Namespace string
KubeConfig string
}
func NewBootConfig(ns, kubepath string) *BootSettings {
env := &BootSettings{
Namespace: ns,
KubeConfig: kubepath,
}
env.config = &genericclioptions.ConfigFlags{
Namespace: &env.Namespace,
KubeConfig: &env.KubeConfig,
}
return env
}
// RESTClientGetter gets the kubeconfig from BootSettings
func (s *BootSettings) RESTClientGetter() genericclioptions.RESTClientGetter {
return s.config
}

View File

@@ -1,20 +0,0 @@
package bootstrap
import (
"testing"
)
func TestBootSettings(t *testing.T) {
ns := "test"
kpath := "somepath"
settings := NewBootConfig(ns, kpath)
if settings.Namespace != ns {
t.Errorf("expected namespace %q, got %q", ns, settings.Namespace)
}
if settings.KubeConfig != kpath {
t.Errorf("expected kube-config %q, got %q", kpath, settings.KubeConfig)
}
}

View File

@@ -1,63 +0,0 @@
package bootstrap
import (
"context"
"errors"
"github.com/rancherfederal/hauler/pkg/driver"
"github.com/rancherfederal/hauler/pkg/kube"
"github.com/rancherfederal/hauler/pkg/log"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/release"
"os"
"time"
)
func waitForDriver(ctx context.Context, d driver.Driver) error {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
//TODO: This is a janky way of waiting for file to exist
for {
_, err := os.Stat(d.KubeConfigPath())
if err == nil {
break
}
if ctx.Err() == context.DeadlineExceeded {
return errors.New("timed out waiting for driver to provision")
}
time.Sleep(1 * time.Second)
}
cfg, err := kube.NewKubeConfig()
if err != nil {
return err
}
sc, err := kube.NewStatusChecker(cfg, 5*time.Second, 5*time.Minute)
if err != nil {
return err
}
return sc.WaitForCondition(d.SystemObjects()...)
}
//TODO: This is likely way too fleet specific
func installChart(cf *BootSettings, chart *chart.Chart, releaseName string, vals map[string]interface{}, logger log.Logger) (*release.Release, error) {
actionConfig := new(action.Configuration)
if err := actionConfig.Init(cf.RESTClientGetter(), cf.Namespace, os.Getenv("HELM_DRIVER"), logger.Debugf); err != nil {
return nil, err
}
client := action.NewInstall(actionConfig)
client.ReleaseName = releaseName
client.CreateNamespace = true
client.Wait = true
//TODO: Do this better
client.Namespace = cf.Namespace
return client.Run(chart, vals)
}

102
pkg/cache/cache.go vendored Normal file
View File

@@ -0,0 +1,102 @@
package cache
import (
"errors"
"io"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/rancherfederal/hauler/pkg/artifact"
)
type Cache interface {
Put(v1.Layer) (v1.Layer, error)
Get(v1.Hash) (v1.Layer, error)
}
var ErrLayerNotFound = errors.New("layer not found")
type oci struct {
artifact.OCI
c Cache
}
func Oci(o artifact.OCI, c Cache) artifact.OCI {
return &oci{
OCI: o,
c: c,
}
}
func (o *oci) Layers() ([]v1.Layer, error) {
ls, err := o.OCI.Layers()
if err != nil {
return nil, err
}
var out []v1.Layer
for _, l := range ls {
out = append(out, &lazyLayer{inner: l, c: o.c})
}
return out, nil
}
type lazyLayer struct {
inner v1.Layer
c Cache
}
func (l *lazyLayer) Compressed() (io.ReadCloser, error) {
digest, err := l.inner.Digest()
if err != nil {
return nil, err
}
layer, err := l.getOrPut(digest)
if err != nil {
return nil, err
}
return layer.Compressed()
}
func (l *lazyLayer) Uncompressed() (io.ReadCloser, error) {
diffID, err := l.inner.DiffID()
if err != nil {
return nil, err
}
layer, err := l.getOrPut(diffID)
if err != nil {
return nil, err
}
return layer.Uncompressed()
}
func (l *lazyLayer) getOrPut(h v1.Hash) (v1.Layer, error) {
var layer v1.Layer
if cl, err := l.c.Get(h); err == nil {
layer = cl
} else if err == ErrLayerNotFound {
rl, err := l.c.Put(l.inner)
if err != nil {
return nil, err
}
layer = rl
} else {
return nil, err
}
return layer, nil
}
func (l *lazyLayer) Size() (int64, error) { return l.inner.Size() }
func (l *lazyLayer) DiffID() (v1.Hash, error) { return l.inner.Digest() }
func (l *lazyLayer) Digest() (v1.Hash, error) { return l.inner.Digest() }
func (l *lazyLayer) MediaType() (types.MediaType, error) { return l.inner.MediaType() }

5
pkg/cache/doc.go vendored Normal file
View File

@@ -0,0 +1,5 @@
package cache
/*
This package is _heavily_ influenced by go-containerregistry and it's cache implementation: https://github.com/google/go-containerregistry/tree/main/pkg/v1/cache
*/

120
pkg/cache/filesystem.go vendored Normal file
View File

@@ -0,0 +1,120 @@
package cache
import (
"io"
"os"
"path/filepath"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/rancherfederal/hauler/pkg/artifact/local"
)
type fs struct {
root string
}
func NewFilesystem(root string) Cache {
return &fs{root: root}
}
func (f *fs) Put(l v1.Layer) (v1.Layer, error) {
digest, err := l.Digest()
if err != nil {
return nil, err
}
diffID, err := l.DiffID()
if err != nil {
return nil, err
}
return &cachedLayer{
Layer: l,
root: f.root,
digest: digest,
diffID: diffID,
}, nil
}
func (f *fs) Get(h v1.Hash) (v1.Layer, error) {
opener := f.open(h)
l, err := local.LayerFromOpener(opener)
if os.IsNotExist(err) {
return nil, ErrLayerNotFound
}
return l, err
}
func (f *fs) open(h v1.Hash) local.Opener {
return func() (io.ReadCloser, error) {
return os.Open(layerpath(f.root, h))
}
}
type cachedLayer struct {
v1.Layer
root string
digest, diffID v1.Hash
}
func (l *cachedLayer) create(h v1.Hash) (io.WriteCloser, error) {
lp := layerpath(l.root, h)
if err := os.MkdirAll(filepath.Dir(lp), os.ModePerm); err != nil {
return nil, err
}
return os.Create(lp)
}
func (l *cachedLayer) Compressed() (io.ReadCloser, error) {
f, err := l.create(l.digest)
if err != nil {
return nil, nil
}
rc, err := l.Layer.Compressed()
if err != nil {
return nil, err
}
return &readcloser{
t: io.TeeReader(rc, f),
closes: []func() error{rc.Close, f.Close},
}, nil
}
func (l *cachedLayer) Uncompressed() (io.ReadCloser, error) {
f, err := l.create(l.diffID)
if err != nil {
return nil, err
}
rc, err := l.Layer.Uncompressed()
if err != nil {
return nil, err
}
return &readcloser{
t: io.TeeReader(rc, f),
closes: []func() error{rc.Close, f.Close},
}, nil
}
func layerpath(root string, h v1.Hash) string {
return filepath.Join(root, h.Algorithm, h.Hex)
}
type readcloser struct {
t io.Reader
closes []func() error
}
func (rc *readcloser) Read(b []byte) (int, error) {
return rc.t.Read(b)
}
func (rc *readcloser) Close() error {
var err error
for _, c := range rc.closes {
lastErr := c()
if err == nil {
err = lastErr
}
}
return err
}

View File

@@ -0,0 +1,78 @@
package chart
import (
gname "github.com/google/go-containerregistry/pkg/name"
"github.com/rancherfederal/hauler/pkg/artifact"
"github.com/rancherfederal/hauler/pkg/content/chart"
"github.com/rancherfederal/hauler/pkg/content/image"
)
var _ artifact.Collection = (*tchart)(nil)
// tchart is a thick chart that includes all the dependent images as well as the chart itself
type tchart struct {
chart *chart.Chart
computed bool
contents map[gname.Reference]artifact.OCI
}
func NewChart(name, repo, version string) (artifact.Collection, error) {
o, err := chart.NewChart(name, repo, version)
if err != nil {
return nil, err
}
return &tchart{
chart: o,
contents: make(map[gname.Reference]artifact.OCI),
}, nil
}
func (c *tchart) Contents() (map[gname.Reference]artifact.OCI, error) {
if err := c.compute(); err != nil {
return nil, err
}
return c.contents, nil
}
func (c *tchart) compute() error {
if c.computed {
return nil
}
if err := c.dependentImages(); err != nil {
return err
}
c.computed = true
return nil
}
func (c *tchart) dependentImages() error {
ch, err := c.chart.Load()
if err != nil {
return err
}
imgs, err := ImagesInChart(ch)
if err != nil {
return err
}
for _, img := range imgs.Spec.Images {
ref, err := gname.ParseReference(img.Ref)
if err != nil {
return err
}
i, err := image.NewImage(img.Ref)
if err != nil {
return err
}
c.contents[ref] = i
}
return nil
}

View File

@@ -0,0 +1,121 @@
package chart
import (
"bytes"
"encoding/json"
"io"
"strings"
"github.com/rancher/wrangler/pkg/yaml"
"helm.sh/helm/v3/pkg/action"
helmchart "helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chartutil"
"helm.sh/helm/v3/pkg/kube/fake"
"helm.sh/helm/v3/pkg/storage"
"helm.sh/helm/v3/pkg/storage/driver"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/util/jsonpath"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
)
var defaultKnownImagePaths = []string{
// Deployments & DaemonSets
"{.spec.template.spec.initContainers[*].image}",
"{.spec.template.spec.containers[*].image}",
// Pods
"{.spec.initContainers[*].image}",
"{.spec.containers[*].image}",
}
// ImagesInChart will render a chart and identify all dependent images from it
func ImagesInChart(c *helmchart.Chart) (v1alpha1.Images, error) {
objs, err := template(c)
if err != nil {
return v1alpha1.Images{}, err
}
var imageRefs []string
for _, o := range objs {
d, err := o.(*unstructured.Unstructured).MarshalJSON()
if err != nil {
// TODO: Should we actually capture these errors?
continue
}
var obj interface{}
if err := json.Unmarshal(d, &obj); err != nil {
continue
}
j := jsonpath.New("")
j.AllowMissingKeys(true)
for _, p := range defaultKnownImagePaths {
r, err := parseJSONPath(obj, j, p)
if err != nil {
continue
}
imageRefs = append(imageRefs, r...)
}
}
ims := v1alpha1.Images{
Spec: v1alpha1.ImageSpec{
Images: []v1alpha1.Image{},
},
}
for _, ref := range imageRefs {
ims.Spec.Images = append(ims.Spec.Images, v1alpha1.Image{Ref: ref})
}
return ims, nil
}
func template(c *helmchart.Chart) ([]runtime.Object, error) {
s := storage.Init(driver.NewMemory())
templateCfg := &action.Configuration{
RESTClientGetter: nil,
Releases: s,
KubeClient: &fake.PrintingKubeClient{Out: io.Discard},
Capabilities: chartutil.DefaultCapabilities,
Log: func(format string, v ...interface{}) {},
}
// TODO: Do we need values if we're claiming this is best effort image detection?
// Justification being: if users are relying on us to get images from their values, they could just add images to the []ImagesInChart spec of the Store api
vals := make(map[string]interface{})
client := action.NewInstall(templateCfg)
client.ReleaseName = "dry"
client.DryRun = true
client.Replace = true
client.ClientOnly = true
client.IncludeCRDs = true
release, err := client.Run(c, vals)
if err != nil {
return nil, err
}
return yaml.ToObjects(bytes.NewBufferString(release.Manifest))
}
func parseJSONPath(data interface{}, parser *jsonpath.JSONPath, template string) ([]string, error) {
buf := new(bytes.Buffer)
if err := parser.Parse(template); err != nil {
return nil, err
}
if err := parser.Execute(buf, data); err != nil {
return nil, err
}
f := func(s rune) bool { return s == ' ' }
r := strings.FieldsFunc(buf.String(), f)
return r, nil
}

191
pkg/collection/k3s/k3s.go Normal file
View File

@@ -0,0 +1,191 @@
package k3s
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"path"
"strings"
"github.com/google/go-containerregistry/pkg/name"
"github.com/rancherfederal/hauler/pkg/artifact"
"github.com/rancherfederal/hauler/pkg/content/file"
"github.com/rancherfederal/hauler/pkg/content/image"
)
var _ artifact.Collection = (*k3s)(nil)
const (
releaseUrl = "https://github.com/k3s-io/k3s/releases/download"
channelUrl = "https://update.k3s.io/v1-release/channels"
bootstrapUrl = "https://get.k3s.io"
)
var (
ErrImagesNotFound = errors.New("k3s dependent images not found")
ErrFetchingImages = errors.New("failed to fetch k3s dependent images")
ErrExecutableNotfound = errors.New("k3s executable not found")
ErrChannelNotFound = errors.New("desired k3s channel not found")
)
type k3s struct {
version string
arch string
computed bool
contents map[name.Reference]artifact.OCI
channels map[string]string
}
func NewK3s(version string) (artifact.Collection, error) {
return &k3s{
version: version,
contents: make(map[name.Reference]artifact.OCI),
}, nil
}
func (k *k3s) Contents() (map[name.Reference]artifact.OCI, error) {
if err := k.compute(); err != nil {
return nil, err
}
return k.contents, nil
}
func (k *k3s) compute() error {
if k.computed {
return nil
}
if err := k.fetchChannels(); err == nil {
if version, ok := k.channels[k.version]; ok {
k.version = version
}
}
if err := k.images(); err != nil {
return err
}
if err := k.executable(); err != nil {
return err
}
if err := k.bootstrap(); err != nil {
return err
}
k.computed = true
return nil
}
func (k *k3s) executable() error {
n := "k3s"
if k.arch != "" && k.arch != "amd64" {
n = fmt.Sprintf("name-%s", k.arch)
}
fref := k.releaseUrl(n)
resp, err := http.Head(fref)
if resp.StatusCode != http.StatusOK || err != nil {
return ErrExecutableNotfound
}
f, err := file.NewFile(fref, "k3s")
if err != nil {
return err
}
ref, err := name.ParseReference("hauler/k3s", name.WithDefaultTag(k.dnsCompliantVersion()), name.WithDefaultRegistry(""))
if err != nil {
return err
}
k.contents[ref] = f
return nil
}
func (k *k3s) bootstrap() error {
f, err := file.NewFile(bootstrapUrl, "get-k3s.io")
if err != nil {
return err
}
ref, err := name.ParseReference("hauler/get-k3s.io", name.WithDefaultRegistry(""), name.WithDefaultTag("latest"))
if err != nil {
return err
}
k.contents[ref] = f
return nil
}
func (k *k3s) images() error {
resp, err := http.Get(k.releaseUrl("k3s-images.txt"))
if resp.StatusCode != http.StatusOK {
return ErrFetchingImages
} else if err != nil {
return ErrImagesNotFound
}
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
reference := scanner.Text()
ref, err := name.ParseReference(reference)
if err != nil {
return err
}
o, err := image.NewImage(reference)
if err != nil {
return err
}
k.contents[ref] = o
}
return nil
}
func (k *k3s) releaseUrl(artifact string) string {
u, _ := url.Parse(releaseUrl)
complete := []string{u.Path}
u.Path = path.Join(append(complete, []string{k.version, artifact}...)...)
return u.String()
}
func (k *k3s) dnsCompliantVersion() string {
return strings.ReplaceAll(k.version, "+", "-")
}
func (k *k3s) fetchChannels() error {
resp, err := http.Get(channelUrl)
if err != nil {
return err
}
var c channel
if err := json.NewDecoder(resp.Body).Decode(&c); err != nil {
return err
}
channels := make(map[string]string)
for _, ch := range c.Data {
channels[ch.Name] = ch.Latest
}
k.channels = channels
return nil
}
type channel struct {
Data []channelData `json:"data"`
}
type channelData struct {
ID string `json:"id"`
Name string `json:"name"`
Latest string `json:"latest"`
}

View File

@@ -0,0 +1,71 @@
package k3s
import (
"context"
"os"
"testing"
"github.com/rancherfederal/hauler/pkg/artifact"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
// TODO: This is not at all a good test, we really just need to test the added collections functionality (like image scanning)
func TestNewK3s(t *testing.T) {
ctx := context.Background()
l := log.NewLogger(os.Stdout)
ctx = l.WithContext(ctx)
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
t.Error(err)
}
defer os.Remove(tmpdir)
s := store.NewStore(ctx, tmpdir)
s.Open()
defer s.Close()
type args struct {
version string
}
tests := []struct {
name string
args args
want artifact.Collection
wantErr bool
}{
{
name: "should work",
args: args{
version: "v1.22.2+k3s2",
},
want: nil,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := NewK3s(tt.args.version)
if (err != nil) != tt.wantErr {
t.Errorf("NewK3s() error = %v, wantErr %v", err, tt.wantErr)
return
}
c, err := got.Contents()
if err != nil {
t.Fatal(err)
}
for r, o := range c {
if _, err := s.AddArtifact(ctx, o, r); err != nil {
t.Fatal(err)
}
}
// if !reflect.DeepEqual(got, tt.want) {
// t.Errorf("NewK3s() got = %v, want %v", got, tt.want)
// }
})
}
}

141
pkg/content/chart/chart.go Normal file
View File

@@ -0,0 +1,141 @@
package chart
import (
"bytes"
"encoding/json"
"io"
"os"
"path/filepath"
gv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
gtypes "github.com/google/go-containerregistry/pkg/v1/types"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chart/loader"
"helm.sh/helm/v3/pkg/cli"
"github.com/rancherfederal/hauler/pkg/artifact"
"github.com/rancherfederal/hauler/pkg/artifact/local"
"github.com/rancherfederal/hauler/pkg/artifact/types"
)
var _ artifact.OCI = (*Chart)(nil)
type Chart struct {
path string
annotations map[string]string
}
func NewChart(name, repo, version string) (*Chart, error) {
cpo := action.ChartPathOptions{
RepoURL: repo,
Version: version,
}
cp, err := cpo.LocateChart(name, cli.New())
if err != nil {
return nil, err
}
return &Chart{
path: cp,
}, nil
}
func (h *Chart) MediaType() string {
return types.OCIManifestSchema1
}
func (h *Chart) Manifest() (*gv1.Manifest, error) {
cfgDesc, err := h.configDescriptor()
if err != nil {
return nil, err
}
var layerDescs []gv1.Descriptor
ls, err := h.Layers()
for _, l := range ls {
desc, err := partial.Descriptor(l)
if err != nil {
return nil, err
}
layerDescs = append(layerDescs, *desc)
}
return &gv1.Manifest{
SchemaVersion: 2,
MediaType: gtypes.MediaType(h.MediaType()),
Config: cfgDesc,
Layers: layerDescs,
Annotations: h.annotations,
}, nil
}
func (h *Chart) RawConfig() ([]byte, error) {
ch, err := loader.Load(h.path)
if err != nil {
return nil, err
}
return json.Marshal(ch.Metadata)
}
func (h *Chart) configDescriptor() (gv1.Descriptor, error) {
data, err := h.RawConfig()
if err != nil {
return gv1.Descriptor{}, err
}
hash, size, err := gv1.SHA256(bytes.NewBuffer(data))
if err != nil {
return gv1.Descriptor{}, err
}
return gv1.Descriptor{
MediaType: types.ChartConfigMediaType,
Size: size,
Digest: hash,
}, nil
}
func (h *Chart) Load() (*chart.Chart, error) {
rc, err := chartOpener(h.path)()
if err != nil {
return nil, err
}
defer rc.Close()
return loader.LoadArchive(rc)
}
func (h *Chart) Layers() ([]gv1.Layer, error) {
chartDataLayer, err := h.chartDataLayer()
if err != nil {
return nil, err
}
return []gv1.Layer{
chartDataLayer,
// TODO: Add provenance
}, nil
}
func (h *Chart) RawChartData() ([]byte, error) {
return os.ReadFile(h.path)
}
func (h *Chart) chartDataLayer() (gv1.Layer, error) {
annotations := make(map[string]string)
annotations[ocispec.AnnotationTitle] = filepath.Base(h.path)
return local.LayerFromOpener(chartOpener(h.path),
local.WithMediaType(types.ChartLayerMediaType),
local.WithAnnotations(annotations))
}
func chartOpener(path string) local.Opener {
return func() (io.ReadCloser, error) {
return os.Open(path)
}
}

View File

@@ -0,0 +1,72 @@
package chart_test
import (
"context"
"os"
"path"
"testing"
"github.com/google/go-containerregistry/pkg/name"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/content/chart"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
func TestChart_Copy(t *testing.T) {
ctx := context.Background()
l := log.NewLogger(os.Stdout)
ctx = l.WithContext(ctx)
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
t.Error(err)
}
defer os.Remove(tmpdir)
s := store.NewStore(ctx, tmpdir)
s.Open()
defer s.Close()
type args struct {
ctx context.Context
registry string
}
tests := []struct {
name string
cfg v1alpha1.Chart
args args
wantErr bool
}{
// TODO: This test isn't self-contained
{
name: "should work with unversioned chart",
cfg: v1alpha1.Chart{
Name: "loki",
RepoURL: "https://grafana.github.io/helm-charts",
},
args: args{
ctx: ctx,
registry: s.Registry(),
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c, err := chart.NewChart(tt.cfg.Name, tt.cfg.RepoURL, tt.cfg.Version)
if err != nil {
t.Fatal(err)
}
ref, err := name.ParseReference(path.Join("hauler", tt.cfg.Name))
if err != nil {
t.Fatal(err)
}
if _, err := s.AddArtifact(ctx, c, ref); (err != nil) != tt.wantErr {
t.Error(err)
}
})
}
}

24
pkg/content/content.go Normal file
View File

@@ -0,0 +1,24 @@
package content
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/yaml"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
)
func Load(data []byte) (schema.ObjectKind, error) {
var tm *metav1.TypeMeta
if err := yaml.Unmarshal(data, &tm); err != nil {
return nil, err
}
if tm.GroupVersionKind().GroupVersion() != v1alpha1.ContentGroupVersion && tm.GroupVersionKind().GroupVersion() != v1alpha1.CollectionGroupVersion {
return nil, fmt.Errorf("unrecognized content/collection type: %s", tm.GroupVersionKind().String())
}
return tm, nil
}

View File

@@ -0,0 +1,82 @@
package file
import (
"bytes"
"encoding/json"
gv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
gtypes "github.com/google/go-containerregistry/pkg/v1/types"
"github.com/rancherfederal/hauler/pkg/artifact/types"
)
var _ partial.Describable = (*config)(nil)
type config struct {
Reference string `json:"ref"` // Reference is the reference from where the file was sourced
Name string `json:"name"` // Name is the files name on disk
Annotations map[string]string `json:"annotations,omitempty"`
URLs []string `json:"urls,omitempty"`
computed bool
size int64
hash gv1.Hash
}
func (c config) Descriptor() (gv1.Descriptor, error) {
if err := c.compute(); err != nil {
return gv1.Descriptor{}, err
}
return gv1.Descriptor{
MediaType: types.FileMediaType,
Size: c.size,
Digest: c.hash,
URLs: c.URLs,
Annotations: c.Annotations,
// Platform: nil,
}, nil
}
func (c config) Digest() (gv1.Hash, error) {
if err := c.compute(); err != nil {
return gv1.Hash{}, err
}
return c.hash, nil
}
func (c config) MediaType() (gtypes.MediaType, error) {
return types.FileMediaType, nil
}
func (c config) Size() (int64, error) {
if err := c.compute(); err != nil {
return 0, err
}
return c.size, nil
}
func (c *config) Raw() ([]byte, error) {
return json.Marshal(c)
}
func (c *config) compute() error {
if c.computed {
return nil
}
data, err := c.Raw()
if err != nil {
return err
}
h, size, err := gv1.SHA256(bytes.NewBuffer(data))
if err != nil {
return err
}
c.size = size
c.hash = h
return nil
}

107
pkg/content/file/file.go Normal file
View File

@@ -0,0 +1,107 @@
package file
import (
"io"
"net/http"
"os"
"strings"
gv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
gtypes "github.com/google/go-containerregistry/pkg/v1/types"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/rancherfederal/hauler/pkg/artifact"
"github.com/rancherfederal/hauler/pkg/artifact/local"
"github.com/rancherfederal/hauler/pkg/artifact/types"
)
var _ artifact.OCI = (*file)(nil)
type file struct {
blob gv1.Layer
config config
blobMap map[gv1.Hash]gv1.Layer
annotations map[string]string
}
func NewFile(ref string, filename string) (*file, error) {
var getter local.Opener
if strings.HasPrefix(ref, "http") || strings.HasPrefix(ref, "https") {
getter = remoteOpener(ref)
} else {
getter = localOpener(ref)
}
annotations := make(map[string]string)
annotations[ocispec.AnnotationTitle] = filename // For oras FileStore to recognize
annotations[ocispec.AnnotationSource] = ref
blob, err := local.LayerFromOpener(getter,
local.WithMediaType(types.FileLayerMediaType),
local.WithAnnotations(annotations))
if err != nil {
return nil, err
}
f := &file{
blob: blob,
config: config{
Reference: ref,
Name: filename,
},
}
return f, nil
}
func (f *file) MediaType() string {
return types.OCIManifestSchema1
}
func (f *file) RawConfig() ([]byte, error) {
return f.config.Raw()
}
func (f *file) Layers() ([]gv1.Layer, error) {
var layers []gv1.Layer
layers = append(layers, f.blob)
return layers, nil
}
func (f *file) Manifest() (*gv1.Manifest, error) {
desc, err := partial.Descriptor(f.blob)
if err != nil {
return nil, err
}
layerDescs := []gv1.Descriptor{*desc}
cfgDesc, err := f.config.Descriptor()
if err != nil {
return nil, err
}
return &gv1.Manifest{
SchemaVersion: 2,
MediaType: gtypes.MediaType(f.MediaType()),
Config: cfgDesc,
Layers: layerDescs,
Annotations: f.annotations,
}, nil
}
func localOpener(path string) local.Opener {
return func() (io.ReadCloser, error) {
return os.Open(path)
}
}
func remoteOpener(url string) local.Opener {
return func() (io.ReadCloser, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
return resp.Body, nil
}
}

View File

@@ -0,0 +1,188 @@
package file_test
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"reflect"
"testing"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/artifact/types"
"github.com/rancherfederal/hauler/pkg/content/file"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
func TestFile_Copy(t *testing.T) {
ctx := context.Background()
l := log.NewLogger(os.Stdout)
ctx = l.WithContext(ctx)
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
t.Error(err)
}
defer os.Remove(tmpdir)
// Make a temp file
f, err := os.CreateTemp(tmpdir, "tmp")
f.Write([]byte("content"))
defer f.Close()
fs := newTestFileServer(tmpdir)
fs.Start()
defer fs.Stop()
s := store.NewStore(ctx, tmpdir)
s.Open()
defer s.Close()
type args struct {
ctx context.Context
registry string
}
tests := []struct {
name string
cfg v1alpha1.File
args args
wantErr bool
}{
{
name: "should copy a local file successfully without an explicit name",
cfg: v1alpha1.File{
Ref: f.Name(),
Name: filepath.Base(f.Name()),
},
args: args{
ctx: ctx,
},
},
{
name: "should copy a local file successfully with an explicit name",
cfg: v1alpha1.File{
Ref: f.Name(),
Name: "my-other-file",
},
args: args{
ctx: ctx,
},
},
{
name: "should fail to copy a local file successfully with a malformed explicit name",
cfg: v1alpha1.File{
Ref: f.Name(),
Name: "my!invalid~@file",
},
args: args{
ctx: ctx,
},
wantErr: true,
},
{
name: "should copy a remote file successfully without an explicit name",
cfg: v1alpha1.File{
Ref: fmt.Sprintf("%s/%s", fs.server.URL, filepath.Base(f.Name())),
},
args: args{
ctx: ctx,
},
},
{
name: "should copy a remote file successfully with an explicit name",
cfg: v1alpha1.File{
Ref: fmt.Sprintf("%s/%s", fs.server.URL, filepath.Base(f.Name())),
Name: "my-other-file",
},
args: args{
ctx: ctx,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
f, err := file.NewFile(tt.cfg.Ref, tt.cfg.Name)
if err != nil {
t.Fatal(err)
}
ref, err := name.ParseReference("myfile")
if err != nil {
t.Fatal(err)
}
_, err = s.AddArtifact(ctx, f, ref)
if (err != nil) != tt.wantErr {
t.Error(err)
}
// if err := validate(tt.cfg.Ref, tt.cfg.Name, m); err != nil {
// t.Error(err)
// }
})
}
}
type testFileServer struct {
server *httptest.Server
}
func newTestFileServer(path string) *testFileServer {
s := httptest.NewUnstartedServer(http.FileServer(http.Dir(path)))
return &testFileServer{server: s}
}
func (s *testFileServer) Start() *httptest.Server {
s.server.Start()
return s.server
}
func (s *testFileServer) Stop() {
s.server.Close()
}
// validate ensure
func validate(ref string, name string, got *v1.Manifest) error {
data, err := os.ReadFile(ref)
if err != nil {
return err
}
d := digest.FromBytes(data)
annotations := make(map[string]string)
annotations[ocispec.AnnotationTitle] = name
annotations[ocispec.AnnotationSource] = ref
want := &v1.Manifest{
SchemaVersion: 2,
MediaType: types.OCIManifestSchema1,
Config: v1.Descriptor{},
Layers: []v1.Descriptor{
{
MediaType: types.FileLayerMediaType,
Size: int64(len(data)),
Digest: v1.Hash{
Algorithm: d.Algorithm().String(),
Hex: d.Hex(),
},
Annotations: annotations,
},
},
Annotations: nil,
}
if !reflect.DeepEqual(want.Layers, got.Layers) {
return fmt.Errorf("want = (%v) | got = (%v)", want, got)
}
return nil
}

View File

@@ -0,0 +1,43 @@
package image
import (
"github.com/google/go-containerregistry/pkg/name"
gv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/rancherfederal/hauler/pkg/artifact"
)
var _ artifact.OCI = (*image)(nil)
func (i *image) MediaType() string {
mt, err := i.Image.MediaType()
if err != nil {
return ""
}
return string(mt)
}
func (i *image) RawConfig() ([]byte, error) {
return i.RawConfigFile()
}
type image struct {
gv1.Image
}
func NewImage(ref string) (*image, error) {
r, err := name.ParseReference(ref)
if err != nil {
return nil, err
}
img, err := remote.Image(r)
if err != nil {
return nil, err
}
return &image{
Image: img,
}, nil
}

View File

@@ -0,0 +1,99 @@
package image_test
import (
"context"
"os"
"path"
"path/filepath"
"testing"
"github.com/google/go-containerregistry/pkg/name"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/content/image"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
func TestImage_Copy(t *testing.T) {
ctx := context.Background()
l := log.NewLogger(os.Stdout)
ctx = l.WithContext(ctx)
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
t.Error(err)
}
defer os.Remove(tmpdir)
s := store.NewStore(ctx, tmpdir)
s.Open()
defer s.Close()
type args struct {
ctx context.Context
registry string
}
tests := []struct {
name string
cfg v1alpha1.Image
args args
wantErr bool
}{
// TODO: These mostly test functionality we're not responsible for (go-containerregistry), refactor these to only stuff we care about
{
name: "should work with tagged image",
cfg: v1alpha1.Image{
Ref: "busybox:1.34.1",
},
args: args{
ctx: ctx,
// registry: s.Registry(),
},
wantErr: false,
},
{
name: "should work with digest image",
cfg: v1alpha1.Image{
Ref: "busybox@sha256:6066ca124f8c2686b7ae71aa1d6583b28c6dc3df3bdc386f2c89b92162c597d9",
},
args: args{
ctx: ctx,
// registry: s.Registry(),
},
wantErr: false,
},
{
name: "should work with tagged image",
cfg: v1alpha1.Image{
Ref: "registry:2",
},
args: args{
ctx: ctx,
// registry: s.Registry(),
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
i, err := image.NewImage(tt.cfg.Ref)
if err != nil {
t.Error(err)
}
ref, err := name.ParseReference(path.Join("hauler", filepath.Base(tt.cfg.Ref)))
if err != nil {
t.Fatal(err)
}
if _, err := s.AddArtifact(ctx, i, ref); (err != nil) != tt.wantErr {
t.Error(err)
}
// if err := s.Add(tt.args.ctx, i, ref); (err != nil) != tt.wantErr {
// t.Errorf("Copy() error = %v, wantErr %v", err, tt.wantErr)
// }
})
}
}

View File

@@ -1,49 +0,0 @@
package driver
import (
"context"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"io"
"sigs.k8s.io/cli-utils/pkg/object"
)
type Driver interface {
Name() string
//TODO: Really want this to just return a usable client
KubeConfigPath() string
Images(ctx context.Context) (map[name.Reference]v1.Image, error)
Binary() (io.ReadCloser, error)
SystemObjects() []object.ObjMetadata
Start(io.Writer) error
DataPath(...string) string
WriteConfig() error
}
//NewDriver will return a new concrete Driver type given a kind
func NewDriver(driver v1alpha1.Driver) (d Driver) {
switch driver.Type {
case "rke2":
// TODO
default:
d = K3s{
Version: driver.Version,
Config: K3sConfig{
DataDir: "/var/lib/rancher/k3s",
KubeConfig: "/etc/rancher/k3s/k3s.yaml",
KubeConfigMode: "0644",
Disable: nil,
},
}
}
return
}

View File

@@ -1,872 +0,0 @@
#!/bin/sh
set -e
set -o noglob
# Usage:
# curl ... | ENV_VAR=... sh -
# or
# ENV_VAR=... ./install.sh
#
# Example:
# Installing a server without traefik:
# curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh -
# Installing an agent to point at a server:
# curl ... | K3S_TOKEN=xxx K3S_URL=https://server-url:6443 sh -
#
# Environment variables:
# - K3S_*
# Environment variables which begin with K3S_ will be preserved for the
# systemd service to use. Setting K3S_URL without explicitly setting
# a systemd exec command will default the command to "agent", and we
# enforce that K3S_TOKEN or K3S_CLUSTER_SECRET is also set.
#
# - INSTALL_K3S_SKIP_DOWNLOAD
# If set to true will not download k3s hash or binary.
#
# - INSTALL_K3S_FORCE_RESTART
# If set to true will always restart the K3s service
#
# - INSTALL_K3S_SYMLINK
# If set to 'skip' will not create symlinks, 'force' will overwrite,
# default will symlink if command does not exist in path.
#
# - INSTALL_K3S_SKIP_ENABLE
# If set to true will not enable or start k3s service.
#
# - INSTALL_K3S_SKIP_START
# If set to true will not start k3s service.
#
# - INSTALL_K3S_VERSION
# Version of k3s to download from github. Will attempt to download from the
# stable channel if not specified.
#
# - INSTALL_K3S_COMMIT
# Commit of k3s to download from temporary cloud storage.
# * (for developer & QA use)
#
# - INSTALL_K3S_BIN_DIR
# Directory to install k3s binary, links, and uninstall script to, or use
# /usr/local/bin as the default
#
# - INSTALL_K3S_BIN_DIR_READ_ONLY
# If set to true will not write files to INSTALL_K3S_BIN_DIR, forces
# setting INSTALL_K3S_SKIP_DOWNLOAD=true
#
# - INSTALL_K3S_SYSTEMD_DIR
# Directory to install systemd service and environment files to, or use
# /etc/systemd/system as the default
#
# - INSTALL_K3S_EXEC or script arguments
# Command with flags to use for launching k3s in the systemd service, if
# the command is not specified will default to "agent" if K3S_URL is set
# or "server" if not. The final systemd command resolves to a combination
# of EXEC and script args ($@).
#
# The following commands result in the same behavior:
# curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh -s -
# curl ... | INSTALL_K3S_EXEC="server --disable=traefik" sh -s -
# curl ... | INSTALL_K3S_EXEC="server" sh -s - --disable=traefik
# curl ... | sh -s - server --disable=traefik
# curl ... | sh -s - --disable=traefik
#
# - INSTALL_K3S_NAME
# Name of systemd service to create, will default from the k3s exec command
# if not specified. If specified the name will be prefixed with 'k3s-'.
#
# - INSTALL_K3S_TYPE
# Type of systemd service to create, will default from the k3s exec command
# if not specified.
#
# - INSTALL_K3S_SELINUX_WARN
# If set to true will continue if k3s-selinux policy is not found.
#
# - INSTALL_K3S_SKIP_SELINUX_RPM
# If set to true will skip automatic installation of the k3s RPM.
#
# - INSTALL_K3S_CHANNEL_URL
# Channel URL for fetching k3s download URL.
# Defaults to 'https://update.k3s.io/v1-release/channels'.
#
# - INSTALL_K3S_CHANNEL
# Channel to use for fetching k3s download URL.
# Defaults to 'stable'.
GITHUB_URL=https://github.com/k3s-io/k3s/releases
STORAGE_URL=https://storage.googleapis.com/k3s-ci-builds
DOWNLOADER=
# --- helper functions for logs ---
info()
{
echo '[INFO] ' "$@"
}
warn()
{
echo '[WARN] ' "$@" >&2
}
fatal()
{
echo '[ERROR] ' "$@" >&2
exit 1
}
# --- fatal if no systemd or openrc ---
verify_system() {
if [ -x /sbin/openrc-run ]; then
HAS_OPENRC=true
return
fi
if [ -d /run/systemd ]; then
HAS_SYSTEMD=true
return
fi
fatal 'Can not find systemd or openrc to use as a process supervisor for k3s'
}
# --- add quotes to command arguments ---
quote() {
for arg in "$@"; do
printf '%s\n' "$arg" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/'/"
done
}
# --- add indentation and trailing slash to quoted args ---
quote_indent() {
printf ' \\\n'
for arg in "$@"; do
printf '\t%s \\\n' "$(quote "$arg")"
done
}
# --- escape most punctuation characters, except quotes, forward slash, and space ---
escape() {
printf '%s' "$@" | sed -e 's/\([][!#$%&()*;<=>?\_`{|}]\)/\\\1/g;'
}
# --- escape double quotes ---
escape_dq() {
printf '%s' "$@" | sed -e 's/"/\\"/g'
}
# --- ensures $K3S_URL is empty or begins with https://, exiting fatally otherwise ---
verify_k3s_url() {
case "${K3S_URL}" in
"")
;;
https://*)
;;
*)
fatal "Only https:// URLs are supported for K3S_URL (have ${K3S_URL})"
;;
esac
}
# --- define needed environment variables ---
setup_env() {
# --- use command args if passed or create default ---
case "$1" in
# --- if we only have flags discover if command should be server or agent ---
(-*|"")
if [ -z "${K3S_URL}" ]; then
CMD_K3S=server
else
if [ -z "${K3S_TOKEN}" ] && [ -z "${K3S_TOKEN_FILE}" ] && [ -z "${K3S_CLUSTER_SECRET}" ]; then
fatal "Defaulted k3s exec command to 'agent' because K3S_URL is defined, but K3S_TOKEN, K3S_TOKEN_FILE or K3S_CLUSTER_SECRET is not defined."
fi
CMD_K3S=agent
fi
;;
# --- command is provided ---
(*)
CMD_K3S=$1
shift
;;
esac
verify_k3s_url
CMD_K3S_EXEC="${CMD_K3S}$(quote_indent "$@")"
# --- use systemd name if defined or create default ---
if [ -n "${INSTALL_K3S_NAME}" ]; then
SYSTEM_NAME=k3s-${INSTALL_K3S_NAME}
else
if [ "${CMD_K3S}" = server ]; then
SYSTEM_NAME=k3s
else
SYSTEM_NAME=k3s-${CMD_K3S}
fi
fi
# --- check for invalid characters in system name ---
valid_chars=$(printf '%s' "${SYSTEM_NAME}" | sed -e 's/[][!#$%&()*;<=>?\_`{|}/[:space:]]/^/g;' )
if [ "${SYSTEM_NAME}" != "${valid_chars}" ]; then
invalid_chars=$(printf '%s' "${valid_chars}" | sed -e 's/[^^]/ /g')
fatal "Invalid characters for system name:
${SYSTEM_NAME}
${invalid_chars}"
fi
# --- use sudo if we are not already root ---
SUDO=sudo
if [ $(id -u) -eq 0 ]; then
SUDO=
fi
# --- use systemd type if defined or create default ---
if [ -n "${INSTALL_K3S_TYPE}" ]; then
SYSTEMD_TYPE=${INSTALL_K3S_TYPE}
else
if [ "${CMD_K3S}" = server ]; then
SYSTEMD_TYPE=notify
else
SYSTEMD_TYPE=exec
fi
fi
# --- use binary install directory if defined or create default ---
if [ -n "${INSTALL_K3S_BIN_DIR}" ]; then
BIN_DIR=${INSTALL_K3S_BIN_DIR}
else
# --- use /usr/local/bin if root can write to it, otherwise use /opt/bin if it exists
BIN_DIR=/usr/local/bin
if ! $SUDO sh -c "touch ${BIN_DIR}/k3s-ro-test && rm -rf ${BIN_DIR}/k3s-ro-test"; then
if [ -d /opt/bin ]; then
BIN_DIR=/opt/bin
fi
fi
fi
# --- use systemd directory if defined or create default ---
if [ -n "${INSTALL_K3S_SYSTEMD_DIR}" ]; then
SYSTEMD_DIR="${INSTALL_K3S_SYSTEMD_DIR}"
else
SYSTEMD_DIR=/etc/systemd/system
fi
# --- set related files from system name ---
SERVICE_K3S=${SYSTEM_NAME}.service
UNINSTALL_K3S_SH=${UNINSTALL_K3S_SH:-${BIN_DIR}/${SYSTEM_NAME}-uninstall.sh}
KILLALL_K3S_SH=${KILLALL_K3S_SH:-${BIN_DIR}/k3s-killall.sh}
# --- use service or environment location depending on systemd/openrc ---
if [ "${HAS_SYSTEMD}" = true ]; then
FILE_K3S_SERVICE=${SYSTEMD_DIR}/${SERVICE_K3S}
FILE_K3S_ENV=${SYSTEMD_DIR}/${SERVICE_K3S}.env
elif [ "${HAS_OPENRC}" = true ]; then
$SUDO mkdir -p /etc/rancher/k3s
FILE_K3S_SERVICE=/etc/init.d/${SYSTEM_NAME}
FILE_K3S_ENV=/etc/rancher/k3s/${SYSTEM_NAME}.env
fi
# --- get hash of config & exec for currently installed k3s ---
PRE_INSTALL_HASHES=$(get_installed_hashes)
# --- if bin directory is read only skip download ---
if [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ]; then
INSTALL_K3S_SKIP_DOWNLOAD=true
fi
# --- setup channel values
INSTALL_K3S_CHANNEL_URL=${INSTALL_K3S_CHANNEL_URL:-'https://update.k3s.io/v1-release/channels'}
INSTALL_K3S_CHANNEL=${INSTALL_K3S_CHANNEL:-'stable'}
}
# --- check if skip download environment variable set ---
can_skip_download() {
if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ]; then
return 1
fi
}
# --- verify an executable k3s binary is installed ---
verify_k3s_is_executable() {
if [ ! -x ${BIN_DIR}/k3s ]; then
fatal "Executable k3s binary not found at ${BIN_DIR}/k3s"
fi
}
# --- set arch and suffix, fatal if architecture not supported ---
setup_verify_arch() {
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
case $ARCH in
amd64)
ARCH=amd64
SUFFIX=
;;
x86_64)
ARCH=amd64
SUFFIX=
;;
arm64)
ARCH=arm64
SUFFIX=-${ARCH}
;;
aarch64)
ARCH=arm64
SUFFIX=-${ARCH}
;;
arm*)
ARCH=arm
SUFFIX=-${ARCH}hf
;;
*)
fatal "Unsupported architecture $ARCH"
esac
}
# --- verify existence of network downloader executable ---
verify_downloader() {
# Return failure if it doesn't exist or is no executable
[ -x "$(command -v $1)" ] || return 1
# Set verified executable as our downloader program and return success
DOWNLOADER=$1
return 0
}
# --- create temporary directory and cleanup when done ---
setup_tmp() {
TMP_DIR=$(mktemp -d -t k3s-install.XXXXXXXXXX)
TMP_HASH=${TMP_DIR}/k3s.hash
TMP_BIN=${TMP_DIR}/k3s.bin
cleanup() {
code=$?
set +e
trap - EXIT
rm -rf ${TMP_DIR}
exit $code
}
trap cleanup INT EXIT
}
# --- use desired k3s version if defined or find version from channel ---
get_release_version() {
if [ -n "${INSTALL_K3S_COMMIT}" ]; then
VERSION_K3S="commit ${INSTALL_K3S_COMMIT}"
elif [ -n "${INSTALL_K3S_VERSION}" ]; then
VERSION_K3S=${INSTALL_K3S_VERSION}
else
info "Finding release for channel ${INSTALL_K3S_CHANNEL}"
version_url="${INSTALL_K3S_CHANNEL_URL}/${INSTALL_K3S_CHANNEL}"
case $DOWNLOADER in
curl)
VERSION_K3S=$(curl -w '%{url_effective}' -L -s -S ${version_url} -o /dev/null | sed -e 's|.*/||')
;;
wget)
VERSION_K3S=$(wget -SqO /dev/null ${version_url} 2>&1 | grep -i Location | sed -e 's|.*/||')
;;
*)
fatal "Incorrect downloader executable '$DOWNLOADER'"
;;
esac
fi
info "Using ${VERSION_K3S} as release"
}
# --- download from github url ---
download() {
[ $# -eq 2 ] || fatal 'download needs exactly 2 arguments'
case $DOWNLOADER in
curl)
curl -o $1 -sfL $2
;;
wget)
wget -qO $1 $2
;;
*)
fatal "Incorrect executable '$DOWNLOADER'"
;;
esac
# Abort if download command failed
[ $? -eq 0 ] || fatal 'Download failed'
}
# --- download hash from github url ---
download_hash() {
if [ -n "${INSTALL_K3S_COMMIT}" ]; then
HASH_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}.sha256sum
else
HASH_URL=${GITHUB_URL}/download/${VERSION_K3S}/sha256sum-${ARCH}.txt
fi
info "Downloading hash ${HASH_URL}"
download ${TMP_HASH} ${HASH_URL}
HASH_EXPECTED=$(grep " k3s${SUFFIX}$" ${TMP_HASH})
HASH_EXPECTED=${HASH_EXPECTED%%[[:blank:]]*}
}
# --- check hash against installed version ---
installed_hash_matches() {
if [ -x ${BIN_DIR}/k3s ]; then
HASH_INSTALLED=$(sha256sum ${BIN_DIR}/k3s)
HASH_INSTALLED=${HASH_INSTALLED%%[[:blank:]]*}
if [ "${HASH_EXPECTED}" = "${HASH_INSTALLED}" ]; then
return
fi
fi
return 1
}
# --- download binary from github url ---
download_binary() {
if [ -n "${INSTALL_K3S_COMMIT}" ]; then
BIN_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}
else
BIN_URL=${GITHUB_URL}/download/${VERSION_K3S}/k3s${SUFFIX}
fi
info "Downloading binary ${BIN_URL}"
download ${TMP_BIN} ${BIN_URL}
}
# --- verify downloaded binary hash ---
verify_binary() {
info "Verifying binary download"
HASH_BIN=$(sha256sum ${TMP_BIN})
HASH_BIN=${HASH_BIN%%[[:blank:]]*}
if [ "${HASH_EXPECTED}" != "${HASH_BIN}" ]; then
fatal "Download sha256 does not match ${HASH_EXPECTED}, got ${HASH_BIN}"
fi
}
# --- setup permissions and move binary to system directory ---
setup_binary() {
chmod 755 ${TMP_BIN}
info "Installing k3s to ${BIN_DIR}/k3s"
$SUDO chown root:root ${TMP_BIN}
$SUDO mv -f ${TMP_BIN} ${BIN_DIR}/k3s
}
# --- setup selinux policy ---
setup_selinux() {
case ${INSTALL_K3S_CHANNEL} in
*testing)
rpm_channel=testing
;;
*latest)
rpm_channel=latest
;;
*)
rpm_channel=stable
;;
esac
rpm_site="rpm.rancher.io"
if [ "${rpm_channel}" = "testing" ]; then
rpm_site="rpm-testing.rancher.io"
fi
policy_hint="please install:
yum install -y container-selinux selinux-policy-base
yum install -y https://${rpm_site}/k3s/${rpm_channel}/common/centos/7/noarch/k3s-selinux-0.2-1.el7_8.noarch.rpm
"
policy_error=fatal
if [ "$INSTALL_K3S_SELINUX_WARN" = true ] || grep -q 'ID=flatcar' /etc/os-release; then
policy_error=warn
fi
if [ "$INSTALL_K3S_SKIP_SELINUX_RPM" = true ] || can_skip_download; then
info "Skipping installation of SELinux RPM"
else
install_selinux_rpm ${rpm_site} ${rpm_channel}
fi
if ! $SUDO chcon -u system_u -r object_r -t container_runtime_exec_t ${BIN_DIR}/k3s >/dev/null 2>&1; then
if $SUDO grep '^\s*SELINUX=enforcing' /etc/selinux/config >/dev/null 2>&1; then
$policy_error "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, ${policy_hint}"
fi
else
if [ ! -f /usr/share/selinux/packages/k3s.pp ]; then
$policy_error "Failed to find the k3s-selinux policy, ${policy_hint}"
fi
fi
}
# --- if on an el7/el8 system, install k3s-selinux
install_selinux_rpm() {
if [ -r /etc/redhat-release ] || [ -r /etc/centos-release ] || [ -r /etc/oracle-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
maj_ver=$(echo "$dist_version" | sed -E -e "s/^([0-9]+)\.?[0-9]*$/\1/")
set +o noglob
$SUDO rm -f /etc/yum.repos.d/rancher-k3s-common*.repo
set -o noglob
if [ -r /etc/redhat-release ]; then
case ${maj_ver} in
7)
$SUDO yum -y install yum-utils
$SUDO yum-config-manager --enable rhel-7-server-extras-rpms
;;
8)
:
;;
*)
return
;;
esac
fi
$SUDO tee /etc/yum.repos.d/rancher-k3s-common.repo >/dev/null << EOF
[rancher-k3s-common-${2}]
name=Rancher K3s Common (${2})
baseurl=https://${1}/k3s/${2}/common/centos/${maj_ver}/noarch
enabled=1
gpgcheck=1
gpgkey=https://${1}/public.key
EOF
$SUDO yum -y install "k3s-selinux"
fi
return
}
# --- download and verify k3s ---
download_and_verify() {
if can_skip_download; then
info 'Skipping k3s download and verify'
verify_k3s_is_executable
return
fi
setup_verify_arch
verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files'
setup_tmp
get_release_version
download_hash
if installed_hash_matches; then
info 'Skipping binary downloaded, installed k3s matches hash'
return
fi
download_binary
verify_binary
setup_binary
}
# --- add additional utility links ---
create_symlinks() {
[ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return
[ "${INSTALL_K3S_SYMLINK}" = skip ] && return
for cmd in kubectl crictl ctr; do
if [ ! -e ${BIN_DIR}/${cmd} ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then
which_cmd=$(command -v ${cmd} 2>/dev/null || true)
if [ -z "${which_cmd}" ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then
info "Creating ${BIN_DIR}/${cmd} symlink to k3s"
$SUDO ln -sf k3s ${BIN_DIR}/${cmd}
else
info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, command exists in PATH at ${which_cmd}"
fi
else
info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, already exists"
fi
done
}
# --- create killall script ---
create_killall() {
[ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return
info "Creating killall script ${KILLALL_K3S_SH}"
$SUDO tee ${KILLALL_K3S_SH} >/dev/null << \EOF
#!/bin/sh
[ $(id -u) -eq 0 ] || exec sudo $0 $@
for bin in /var/lib/rancher/k3s/data/**/bin/; do
[ -d $bin ] && export PATH=$PATH:$bin:$bin/aux
done
set -x
for service in /etc/systemd/system/k3s*.service; do
[ -s $service ] && systemctl stop $(basename $service)
done
for service in /etc/init.d/k3s*; do
[ -x $service ] && $service stop
done
pschildren() {
ps -e -o ppid= -o pid= | \
sed -e 's/^\s*//g; s/\s\s*/\t/g;' | \
grep -w "^$1" | \
cut -f2
}
pstree() {
for pid in $@; do
echo $pid
for child in $(pschildren $pid); do
pstree $child
done
done
}
killtree() {
kill -9 $(
{ set +x; } 2>/dev/null;
pstree $@;
set -x;
) 2>/dev/null
}
getshims() {
ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1
}
killtree $({ set +x; } 2>/dev/null; getshims; set -x)
do_unmount_and_remove() {
awk -v path="$1" '$2 ~ ("^" path) { print $2 }' /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount "$0" && rm -rf "$0"'
}
do_unmount_and_remove '/run/k3s'
do_unmount_and_remove '/var/lib/rancher/k3s'
do_unmount_and_remove '/var/lib/kubelet/pods'
do_unmount_and_remove '/var/lib/kubelet/plugins'
do_unmount_and_remove '/run/netns/cni-'
# Remove CNI namespaces
ip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns delete
# Delete network interface(s) that match 'master cni0'
ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do
iface=${iface%%@*}
[ -z "$iface" ] || ip link delete $iface
done
ip link delete cni0
ip link delete flannel.1
rm -rf /var/lib/cni/
iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore
EOF
$SUDO chmod 755 ${KILLALL_K3S_SH}
$SUDO chown root:root ${KILLALL_K3S_SH}
}
# --- create uninstall script ---
create_uninstall() {
[ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return
info "Creating uninstall script ${UNINSTALL_K3S_SH}"
$SUDO tee ${UNINSTALL_K3S_SH} >/dev/null << EOF
#!/bin/sh
set -x
[ \$(id -u) -eq 0 ] || exec sudo \$0 \$@
${KILLALL_K3S_SH}
if command -v systemctl; then
systemctl disable ${SYSTEM_NAME}
systemctl reset-failed ${SYSTEM_NAME}
systemctl daemon-reload
fi
if command -v rc-update; then
rc-update delete ${SYSTEM_NAME} default
fi
rm -f ${FILE_K3S_SERVICE}
rm -f ${FILE_K3S_ENV}
remove_uninstall() {
rm -f ${UNINSTALL_K3S_SH}
}
trap remove_uninstall EXIT
if (ls ${SYSTEMD_DIR}/k3s*.service || ls /etc/init.d/k3s*) >/dev/null 2>&1; then
set +x; echo 'Additional k3s services installed, skipping uninstall of k3s'; set -x
exit
fi
for cmd in kubectl crictl ctr; do
if [ -L ${BIN_DIR}/\$cmd ]; then
rm -f ${BIN_DIR}/\$cmd
fi
done
rm -rf /etc/rancher/k3s
rm -rf /run/k3s
rm -rf /run/flannel
rm -rf /var/lib/rancher/k3s
rm -rf /var/lib/kubelet
rm -f ${BIN_DIR}/k3s
rm -f ${KILLALL_K3S_SH}
if type yum >/dev/null 2>&1; then
yum remove -y k3s-selinux
rm -f /etc/yum.repos.d/rancher-k3s-common*.repo
fi
EOF
$SUDO chmod 755 ${UNINSTALL_K3S_SH}
$SUDO chown root:root ${UNINSTALL_K3S_SH}
}
# --- disable current service if loaded --
systemd_disable() {
$SUDO systemctl disable ${SYSTEM_NAME} >/dev/null 2>&1 || true
$SUDO rm -f /etc/systemd/system/${SERVICE_K3S} || true
$SUDO rm -f /etc/systemd/system/${SERVICE_K3S}.env || true
}
# --- capture current env and create file containing k3s_ variables ---
create_env_file() {
info "env: Creating environment file ${FILE_K3S_ENV}"
$SUDO touch ${FILE_K3S_ENV}
$SUDO chmod 0600 ${FILE_K3S_ENV}
env | grep '^K3S_' | $SUDO tee ${FILE_K3S_ENV} >/dev/null
env | grep -Ei '^(NO|HTTP|HTTPS)_PROXY' | $SUDO tee -a ${FILE_K3S_ENV} >/dev/null
}
# --- write systemd service file ---
create_systemd_service_file() {
info "systemd: Creating service file ${FILE_K3S_SERVICE}"
$SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
Wants=network-online.target
After=network-online.target
[Install]
WantedBy=multi-user.target
[Service]
Type=${SYSTEMD_TYPE}
EnvironmentFile=-/etc/default/%N
EnvironmentFile=-/etc/sysconfig/%N
EnvironmentFile=-${FILE_K3S_ENV}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=0
Restart=always
RestartSec=5s
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=${BIN_DIR}/k3s \\
${CMD_K3S_EXEC}
EOF
}
# --- write openrc service file ---
create_openrc_service_file() {
LOG_FILE=/var/log/${SYSTEM_NAME}.log
info "openrc: Creating service file ${FILE_K3S_SERVICE}"
$SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF
#!/sbin/openrc-run
depend() {
after network-online
want cgroups
}
start_pre() {
rm -f /tmp/k3s.*
}
supervisor=supervise-daemon
name=${SYSTEM_NAME}
command="${BIN_DIR}/k3s"
command_args="$(escape_dq "${CMD_K3S_EXEC}")
>>${LOG_FILE} 2>&1"
output_log=${LOG_FILE}
error_log=${LOG_FILE}
pidfile="/var/run/${SYSTEM_NAME}.pid"
respawn_delay=5
respawn_max=0
set -o allexport
if [ -f /etc/environment ]; then source /etc/environment; fi
if [ -f ${FILE_K3S_ENV} ]; then source ${FILE_K3S_ENV}; fi
set +o allexport
EOF
$SUDO chmod 0755 ${FILE_K3S_SERVICE}
$SUDO tee /etc/logrotate.d/${SYSTEM_NAME} >/dev/null << EOF
${LOG_FILE} {
missingok
notifempty
copytruncate
}
EOF
}
# --- write systemd or openrc service file ---
create_service_file() {
[ "${HAS_SYSTEMD}" = true ] && create_systemd_service_file
[ "${HAS_OPENRC}" = true ] && create_openrc_service_file
return 0
}
# --- get hashes of the current k3s bin and service files
get_installed_hashes() {
$SUDO sha256sum ${BIN_DIR}/k3s ${FILE_K3S_SERVICE} ${FILE_K3S_ENV} 2>&1 || true
}
# --- enable and start systemd service ---
systemd_enable() {
info "systemd: Enabling ${SYSTEM_NAME} unit"
$SUDO systemctl enable ${FILE_K3S_SERVICE} >/dev/null
$SUDO systemctl daemon-reload >/dev/null
}
systemd_start() {
info "systemd: Starting ${SYSTEM_NAME}"
$SUDO systemctl restart ${SYSTEM_NAME}
}
# --- enable and start openrc service ---
openrc_enable() {
info "openrc: Enabling ${SYSTEM_NAME} service for default runlevel"
$SUDO rc-update add ${SYSTEM_NAME} default >/dev/null
}
openrc_start() {
info "openrc: Starting ${SYSTEM_NAME}"
$SUDO ${FILE_K3S_SERVICE} restart
}
# --- startup systemd or openrc service ---
service_enable_and_start() {
[ "${INSTALL_K3S_SKIP_ENABLE}" = true ] && return
[ "${HAS_SYSTEMD}" = true ] && systemd_enable
[ "${HAS_OPENRC}" = true ] && openrc_enable
[ "${INSTALL_K3S_SKIP_START}" = true ] && return
POST_INSTALL_HASHES=$(get_installed_hashes)
if [ "${PRE_INSTALL_HASHES}" = "${POST_INSTALL_HASHES}" ] && [ "${INSTALL_K3S_FORCE_RESTART}" != true ]; then
info 'No change detected so skipping service start'
return
fi
[ "${HAS_SYSTEMD}" = true ] && systemd_start
[ "${HAS_OPENRC}" = true ] && openrc_start
return 0
}
# --- re-evaluate args to include env command ---
eval set -- $(escape "${INSTALL_K3S_EXEC}") $(quote "$@")
# --- run the install process --
{
verify_system
setup_env "$@"
download_and_verify
setup_selinux
create_symlinks
create_killall
create_uninstall
systemd_disable
create_env_file
create_service_file
service_enable_and_start
}

View File

@@ -1,507 +0,0 @@
#!/bin/sh
set -e
if [ "${DEBUG}" = 1 ]; then
set -x
fi
# Usage:
# curl ... | ENV_VAR=... sh -
# or
# ENV_VAR=... ./install.sh
#
# Environment variables:
#
# - INSTALL_RKE2_CHANNEL
# Channel to use for fetching rke2 download URL.
# Defaults to 'latest'.
#
# - INSTALL_RKE2_METHOD
# The installation method to use.
# Default is on RPM-based systems is "rpm", all else "tar".
#
# - INSTALL_RKE2_TYPE
# Type of rke2 service. Can be either "server" or "agent".
# Default is "server".
#
# - INSTALL_RKE2_EXEC
# This is an alias for INSTALL_RKE2_TYPE, included for compatibility with K3s.
# If both are set, INSTALL_RKE2_TYPE is preferred.
#
# - INSTALL_RKE2_VERSION
# Version of rke2 to download from github.
#
# - INSTALL_RKE2_RPM_RELEASE_VERSION
# Version of the rke2 RPM release to install.
# Format would be like "1.el7" or "2.el8"
#
# - INSTALL_RKE2_TAR_PREFIX
# Installation prefix when using the tar installation method.
# Default is /usr/local, unless /usr/local is read-only or has a dedicated mount point,
# in which case /opt/rke2 is used instead.
#
# - INSTALL_RKE2_COMMIT
# Commit of RKE2 to download from temporary cloud storage.
# If set, this forces INSTALL_RKE2_METHOD=tar.
# * (for developer & QA use)
#
# - INSTALL_RKE2_AGENT_IMAGES_DIR
# Installation path for airgap images when installing from CI commit
# Default is /var/lib/rancher/rke2/agent/images
#
# - INSTALL_RKE2_ARTIFACT_PATH
# If set, the install script will use the local path for sourcing the rke2.linux-$SUFFIX and sha256sum-$ARCH.txt files
# rather than the downloading the files from the internet.
# Default is not set.
#
# info logs the given argument at info log level.
info() {
echo "[INFO] " "$@"
}
# warn logs the given argument at warn log level.
warn() {
echo "[WARN] " "$@" >&2
}
# fatal logs the given argument at fatal log level.
fatal() {
echo "[ERROR] " "$@" >&2
if [ -n "${SUFFIX}" ]; then
echo "[ALT] Please visit 'https://github.com/rancher/rke2/releases' directly and download the latest rke2.${SUFFIX}.tar.gz" >&2
fi
exit 1
}
# check_target_mountpoint return success if the target directory is on a dedicated mount point
check_target_mountpoint() {
mountpoint -q "${INSTALL_RKE2_TAR_PREFIX}"
}
# check_target_ro returns success if the target directory is read-only
check_target_ro() {
touch "${INSTALL_RKE2_TAR_PREFIX}"/.rke2-ro-test && rm -rf "${INSTALL_RKE2_TAR_PREFIX}"/.rke2-ro-test
test $? -ne 0
}
# setup_env defines needed environment variables.
setup_env() {
STORAGE_URL="https://storage.googleapis.com/rke2-ci-builds"
INSTALL_RKE2_GITHUB_URL="https://github.com/rancher/rke2"
DEFAULT_TAR_PREFIX="/usr/local"
# --- bail if we are not root ---
if [ ! $(id -u) -eq 0 ]; then
fatal "You need to be root to perform this install"
fi
# --- make sure install channel has a value
if [ -z "${INSTALL_RKE2_CHANNEL}" ]; then
INSTALL_RKE2_CHANNEL="stable"
fi
# --- make sure install type has a value
if [ -z "${INSTALL_RKE2_TYPE}" ]; then
INSTALL_RKE2_TYPE="${INSTALL_RKE2_EXEC:-server}"
fi
# --- use yum install method if available by default
if [ -z "${INSTALL_RKE2_ARTIFACT_PATH}" ] && [ -z "${INSTALL_RKE2_COMMIT}" ] && [ -z "${INSTALL_RKE2_METHOD}" ] && command -v yum >/dev/null 2>&1; then
INSTALL_RKE2_METHOD="yum"
fi
# --- install tarball to /usr/local by default, except if /usr/local is on a separate partition or is read-only
# --- in which case we go into /opt/rke2.
if [ -z "${INSTALL_RKE2_TAR_PREFIX}" ]; then
INSTALL_RKE2_TAR_PREFIX=${DEFAULT_TAR_PREFIX}
if check_target_mountpoint || check_target_ro; then
INSTALL_RKE2_TAR_PREFIX="/opt/rke2"
warn "${DEFAULT_TAR_PREFIX} is read-only or a mount point; installing to ${INSTALL_RKE2_TAR_PREFIX}"
fi
fi
if [ -z "${INSTALL_RKE2_AGENT_IMAGES_DIR}" ]; then
INSTALL_RKE2_AGENT_IMAGES_DIR="/var/lib/rancher/rke2/agent/images"
fi
}
# check_method_conflict will exit with an error if the user attempts to install
# via tar method on a host with existing RPMs.
check_method_conflict() {
case ${INSTALL_RKE2_METHOD} in
yum | rpm | dnf)
return
;;
*)
if rpm -q rke2-common >/dev/null 2>&1; then
fatal "Cannot perform ${INSTALL_RKE2_METHOD:-tar} install on host with existing RKE2 RPMs - please run rke2-uninstall.sh first"
fi
;;
esac
}
# setup_arch set arch and suffix,
# fatal if architecture not supported.
setup_arch() {
case ${ARCH:=$(uname -m)} in
amd64)
ARCH=amd64
SUFFIX=$(uname -s | tr '[:upper:]' '[:lower:]')-${ARCH}
;;
x86_64)
ARCH=amd64
SUFFIX=$(uname -s | tr '[:upper:]' '[:lower:]')-${ARCH}
;;
*)
fatal "unsupported architecture ${ARCH}"
;;
esac
}
# verify_downloader verifies existence of
# network downloader executable.
verify_downloader() {
cmd="$(command -v "${1}")"
if [ -z "${cmd}" ]; then
return 1
fi
if [ ! -x "${cmd}" ]; then
return 1
fi
# Set verified executable as our downloader program and return success
DOWNLOADER=${cmd}
return 0
}
# setup_tmp creates a temporary directory
# and cleans up when done.
setup_tmp() {
TMP_DIR=$(mktemp -d -t rke2-install.XXXXXXXXXX)
TMP_CHECKSUMS=${TMP_DIR}/rke2.checksums
TMP_TARBALL=${TMP_DIR}/rke2.tarball
TMP_AIRGAP_CHECKSUMS=${TMP_DIR}/rke2-images.checksums
TMP_AIRGAP_TARBALL=${TMP_DIR}/rke2-images.tarball
cleanup() {
code=$?
set +e
trap - EXIT
rm -rf "${TMP_DIR}"
exit $code
}
trap cleanup INT EXIT
}
# --- use desired rke2 version if defined or find version from channel ---
get_release_version() {
if [ -n "${INSTALL_RKE2_COMMIT}" ]; then
version="commit ${INSTALL_RKE2_COMMIT}"
elif [ -n "${INSTALL_RKE2_VERSION}" ]; then
version=${INSTALL_RKE2_VERSION}
else
info "finding release for channel ${INSTALL_RKE2_CHANNEL}"
INSTALL_RKE2_CHANNEL_URL=${INSTALL_RKE2_CHANNEL_URL:-'https://update.rke2.io/v1-release/channels'}
version_url="${INSTALL_RKE2_CHANNEL_URL}/${INSTALL_RKE2_CHANNEL}"
case ${DOWNLOADER} in
*curl)
version=$(${DOWNLOADER} -w "%{url_effective}" -L -s -S "${version_url}" -o /dev/null | sed -e 's|.*/||')
;;
*wget)
version=$(${DOWNLOADER} -SqO /dev/null "${version_url}" 2>&1 | grep -i Location | sed -e 's|.*/||')
;;
*)
fatal "Unsupported downloader executable '${DOWNLOADER}'"
;;
esac
INSTALL_RKE2_VERSION="${version}"
fi
}
# check_download performs a HEAD request to see if a file exists at a given url
check_download() {
case ${DOWNLOADER} in
*curl)
curl -o "/dev/null" -fsLI -X HEAD "$1"
;;
*wget)
wget -q --spider "$1"
;;
*)
fatal "downloader executable not supported: '${DOWNLOADER}'"
;;
esac
}
# download downloads a file from a url using either curl or wget
download() {
if [ $# -ne 2 ]; then
fatal "download needs exactly 2 arguments"
fi
case ${DOWNLOADER} in
*curl)
curl -o "$1" -fsSL "$2"
;;
*wget)
wget -qO "$1" "$2"
;;
*)
fatal "downloader executable not supported: '${DOWNLOADER}'"
;;
esac
# Abort if download command failed
if [ $? -ne 0 ]; then
fatal "download failed"
fi
}
# download_checksums downloads hash from github url.
download_checksums() {
if [ -n "${INSTALL_RKE2_COMMIT}" ]; then
CHECKSUMS_URL=${STORAGE_URL}/rke2.${SUFFIX}-${INSTALL_RKE2_COMMIT}.tar.gz.sha256sum
else
CHECKSUMS_URL=${INSTALL_RKE2_GITHUB_URL}/releases/download/${INSTALL_RKE2_VERSION}/sha256sum-${ARCH}.txt
fi
info "downloading checksums at ${CHECKSUMS_URL}"
download "${TMP_CHECKSUMS}" "${CHECKSUMS_URL}"
CHECKSUM_EXPECTED=$(grep "rke2.${SUFFIX}.tar.gz" "${TMP_CHECKSUMS}" | awk '{print $1}')
}
# download_tarball downloads binary from github url.
download_tarball() {
if [ -n "${INSTALL_RKE2_COMMIT}" ]; then
TARBALL_URL=${STORAGE_URL}/rke2.${SUFFIX}-${INSTALL_RKE2_COMMIT}.tar.gz
else
TARBALL_URL=${INSTALL_RKE2_GITHUB_URL}/releases/download/${INSTALL_RKE2_VERSION}/rke2.${SUFFIX}.tar.gz
fi
info "downloading tarball at ${TARBALL_URL}"
download "${TMP_TARBALL}" "${TARBALL_URL}"
}
# stage_local_checksums stages the local checksum hash for validation.
stage_local_checksums() {
info "staging local checksums from ${INSTALL_RKE2_ARTIFACT_PATH}/sha256sum-${ARCH}.txt"
cp -f "${INSTALL_RKE2_ARTIFACT_PATH}/sha256sum-${ARCH}.txt" "${TMP_CHECKSUMS}"
CHECKSUM_EXPECTED=$(grep "rke2.${SUFFIX}.tar.gz" "${TMP_CHECKSUMS}" | awk '{print $1}')
if [ -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.zst" ]; then
AIRGAP_CHECKSUM_EXPECTED=$(grep "rke2-images.${SUFFIX}.tar.zst" "${TMP_CHECKSUMS}" | awk '{print $1}')
elif [ -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.gz" ]; then
AIRGAP_CHECKSUM_EXPECTED=$(grep "rke2-images.${SUFFIX}.tar.gz" "${TMP_CHECKSUMS}" | awk '{print $1}')
fi
}
# stage_local_tarball stages the local tarball.
stage_local_tarball() {
info "staging tarball from ${INSTALL_RKE2_ARTIFACT_PATH}/rke2.${SUFFIX}.tar.gz"
cp -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2.${SUFFIX}.tar.gz" "${TMP_TARBALL}"
}
# stage_local_airgap_tarball stages the local checksum hash for validation.
stage_local_airgap_tarball() {
if [ -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.zst" ]; then
info "staging zst airgap image tarball from ${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.zst"
cp -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.zst" "${TMP_AIRGAP_TARBALL}"
AIRGAP_TARBALL_FORMAT=zst
elif [ -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.gz" ]; then
info "staging gzip airgap image tarball from ${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.gz"
cp -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.gz" "${TMP_AIRGAP_TARBALL}"
AIRGAP_TARBALL_FORMAT=gz
fi
}
# verify_tarball verifies the downloaded installer checksum.
verify_tarball() {
info "verifying tarball"
CHECKSUM_ACTUAL=$(sha256sum "${TMP_TARBALL}" | awk '{print $1}')
if [ "${CHECKSUM_EXPECTED}" != "${CHECKSUM_ACTUAL}" ]; then
fatal "download sha256 does not match ${CHECKSUM_EXPECTED}, got ${CHECKSUM_ACTUAL}"
fi
}
# unpack_tarball extracts the tarball, correcting paths and moving systemd units as necessary
unpack_tarball() {
info "unpacking tarball file to ${INSTALL_RKE2_TAR_PREFIX}"
mkdir -p ${INSTALL_RKE2_TAR_PREFIX}
tar xzf "${TMP_TARBALL}" -C "${INSTALL_RKE2_TAR_PREFIX}"
if [ "${INSTALL_RKE2_TAR_PREFIX}" != "${DEFAULT_TAR_PREFIX}" ]; then
info "updating tarball contents to reflect install path"
sed -i "s|${DEFAULT_TAR_PREFIX}|${INSTALL_RKE2_TAR_PREFIX}|" ${INSTALL_RKE2_TAR_PREFIX}/lib/systemd/system/rke2-*.service ${INSTALL_RKE2_TAR_PREFIX}/bin/rke2-uninstall.sh
info "moving systemd units to /etc/systemd/system"
mv -f ${INSTALL_RKE2_TAR_PREFIX}/lib/systemd/system/rke2-*.service /etc/systemd/system/
info "install complete; you may want to run: export PATH=\$PATH:${INSTALL_RKE2_TAR_PREFIX}/bin"
fi
}
# download_airgap_checksums downloads the checksum file for the airgap image tarball
# and prepares the checksum value for later validation.
download_airgap_checksums() {
if [ -z "${INSTALL_RKE2_COMMIT}" ]; then
return
fi
AIRGAP_CHECKSUMS_URL=${STORAGE_URL}/rke2-images.${SUFFIX}-${INSTALL_RKE2_COMMIT}.tar.zst.sha256sum
# try for zst first; if that fails use gz for older release branches
if ! check_download "${AIRGAP_CHECKSUMS_URL}"; then
AIRGAP_CHECKSUMS_URL=${STORAGE_URL}/rke2-images.${SUFFIX}-${INSTALL_RKE2_COMMIT}.tar.gz.sha256sum
fi
info "downloading airgap checksums at ${AIRGAP_CHECKSUMS_URL}"
download "${TMP_AIRGAP_CHECKSUMS}" "${AIRGAP_CHECKSUMS_URL}"
AIRGAP_CHECKSUM_EXPECTED=$(grep "rke2-images.${SUFFIX}.tar" "${TMP_AIRGAP_CHECKSUMS}" | awk '{print $1}')
}
# download_airgap_tarball downloads the airgap image tarball.
download_airgap_tarball() {
if [ -z "${INSTALL_RKE2_COMMIT}" ]; then
return
fi
AIRGAP_TARBALL_URL=${STORAGE_URL}/rke2-images.${SUFFIX}-${INSTALL_RKE2_COMMIT}.tar.zst
# try for zst first; if that fails use gz for older release branches
if ! check_download "${AIRGAP_TARBALL_URL}"; then
AIRGAP_TARBALL_URL=${STORAGE_URL}/rke2-images.${SUFFIX}-${INSTALL_RKE2_COMMIT}.tar.gz
fi
info "downloading airgap tarball at ${AIRGAP_TARBALL_URL}"
download "${TMP_AIRGAP_TARBALL}" "${AIRGAP_TARBALL_URL}"
}
# verify_airgap_tarball compares the airgap image tarball checksum to the value
# calculated by CI when the file was uploaded.
verify_airgap_tarball() {
if [ -z "${AIRGAP_CHECKSUM_EXPECTED}" ]; then
return
fi
info "verifying airgap tarball"
AIRGAP_CHECKSUM_ACTUAL=$(sha256sum "${TMP_AIRGAP_TARBALL}" | awk '{print $1}')
if [ "${AIRGAP_CHECKSUM_EXPECTED}" != "${AIRGAP_CHECKSUM_ACTUAL}" ]; then
fatal "download sha256 does not match ${AIRGAP_CHECKSUM_EXPECTED}, got ${AIRGAP_CHECKSUM_ACTUAL}"
fi
}
# install_airgap_tarball moves the airgap image tarball into place.
install_airgap_tarball() {
if [ -z "${AIRGAP_CHECKSUM_EXPECTED}" ]; then
return
fi
mkdir -p "${INSTALL_RKE2_AGENT_IMAGES_DIR}"
# releases that provide zst artifacts can read from the compressed archive; older releases
# that produce only gzip artifacts need to have the tarball decompressed ahead of time
if grep -qF '.tar.zst' "${TMP_AIRGAP_CHECKSUMS}" || [ "${AIRGAP_TARBALL_FORMAT}" = "zst" ]; then
info "installing airgap tarball to ${INSTALL_RKE2_AGENT_IMAGES_DIR}"
mv -f "${TMP_AIRGAP_TARBALL}" "${INSTALL_RKE2_AGENT_IMAGES_DIR}/rke2-images.${SUFFIX}.tar.zst"
else
info "decompressing airgap tarball to ${INSTALL_RKE2_AGENT_IMAGES_DIR}"
gzip -dc "${TMP_AIRGAP_TARBALL}" > "${INSTALL_RKE2_AGENT_IMAGES_DIR}/rke2-images.${SUFFIX}.tar"
fi
}
# do_install_rpm builds a yum repo config from the channel and version to be installed,
# and calls yum to install the required packates.
do_install_rpm() {
maj_ver="7"
if [ -r /etc/redhat-release ] || [ -r /etc/centos-release ] || [ -r /etc/oracle-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
maj_ver=$(echo "$dist_version" | sed -E -e "s/^([0-9]+)\.?[0-9]*$/\1/")
case ${maj_ver} in
7|8)
:
;;
*) # In certain cases, like installing on Fedora, maj_ver will end up being something that is not 7 or 8
maj_ver="7"
;;
esac
fi
case "${INSTALL_RKE2_CHANNEL}" in
v*.*)
# We are operating with a version-based channel, so we should parse our version out
rke2_majmin=$(echo "${INSTALL_RKE2_CHANNEL}" | sed -E -e "s/^v([0-9]+\.[0-9]+).*/\1/")
rke2_rpm_channel=$(echo "${INSTALL_RKE2_CHANNEL}" | sed -E -e "s/^v[0-9]+\.[0-9]+-(.*)/\1/")
# If our regex fails to capture a "sane" channel out of the specified channel, fall back to `stable`
if [ "${rke2_rpm_channel}" = ${INSTALL_RKE2_CHANNEL} ]; then
info "using stable RPM repositories"
rke2_rpm_channel="stable"
fi
;;
*)
get_release_version
rke2_majmin=$(echo "${INSTALL_RKE2_VERSION}" | sed -E -e "s/^v([0-9]+\.[0-9]+).*/\1/")
rke2_rpm_channel=${1}
;;
esac
info "using ${rke2_majmin} series from channel ${rke2_rpm_channel}"
rpm_site="rpm.rancher.io"
if [ "${rke2_rpm_channel}" = "testing" ]; then
rpm_site="rpm-${rke2_rpm_channel}.rancher.io"
fi
rm -f /etc/yum.repos.d/rancher-rke2*.repo
cat <<-EOF >"/etc/yum.repos.d/rancher-rke2.repo"
[rancher-rke2-common-${rke2_rpm_channel}]
name=Rancher RKE2 Common (${1})
baseurl=https://${rpm_site}/rke2/${rke2_rpm_channel}/common/centos/${maj_ver}/noarch
enabled=1
gpgcheck=1
gpgkey=https://${rpm_site}/public.key
[rancher-rke2-${rke2_majmin}-${rke2_rpm_channel}]
name=Rancher RKE2 ${rke2_majmin} (${1})
baseurl=https://${rpm_site}/rke2/${rke2_rpm_channel}/${rke2_majmin}/centos/${maj_ver}/x86_64
enabled=1
gpgcheck=1
gpgkey=https://${rpm_site}/public.key
EOF
if [ -z "${INSTALL_RKE2_VERSION}" ]; then
yum -y install "rke2-${INSTALL_RKE2_TYPE}"
else
rke2_rpm_version=$(echo "${INSTALL_RKE2_VERSION}" | sed -E -e "s/[\+-]/~/g" | sed -E -e "s/v(.*)/\1/")
if [ -n "${INSTALL_RKE2_RPM_RELEASE_VERSION}" ]; then
yum -y install "rke2-${INSTALL_RKE2_TYPE}-${rke2_rpm_version}-${INSTALL_RKE2_RPM_RELEASE_VERSION}"
else
yum -y install "rke2-${INSTALL_RKE2_TYPE}-${rke2_rpm_version}"
fi
fi
}
do_install_tar() {
setup_tmp
if [ -n "${INSTALL_RKE2_ARTIFACT_PATH}" ]; then
stage_local_checksums
stage_local_airgap_tarball
stage_local_tarball
else
get_release_version
info "using ${INSTALL_RKE2_VERSION:-commit $INSTALL_RKE2_COMMIT} as release"
download_airgap_checksums
download_airgap_tarball
download_checksums
download_tarball
fi
verify_airgap_tarball
install_airgap_tarball
verify_tarball
unpack_tarball
systemctl daemon-reload
}
do_install() {
setup_env
check_method_conflict
setup_arch
if [ -z "${INSTALL_RKE2_ARTIFACT_PATH}" ]; then
verify_downloader curl || verify_downloader wget || fatal "can not find curl or wget for downloading files"
fi
case ${INSTALL_RKE2_METHOD} in
yum | rpm | dnf)
do_install_rpm "${INSTALL_RKE2_CHANNEL}"
;;
*)
do_install_tar "${INSTALL_RKE2_CHANNEL}"
;;
esac
}
do_install
exit 0

View File

@@ -1,173 +0,0 @@
package driver
import (
"bufio"
"context"
_ "embed"
"fmt"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/imdario/mergo"
"github.com/rancherfederal/hauler/pkg/packager/images"
"io"
"k8s.io/apimachinery/pkg/runtime/schema"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"sigs.k8s.io/cli-utils/pkg/object"
"sigs.k8s.io/yaml"
)
const (
k3sReleaseUrl = "https://github.com/k3s-io/k3s/releases/download"
)
//go:embed embed/k3s-init.sh
var k3sInit string
type K3s struct {
Version string
Config K3sConfig
}
//TODO: Would be nice if these just pointed to k3s/pkg/cli/cmds
type K3sConfig struct {
DataDir string `json:"data-dir,omitempty"`
KubeConfig string `json:"write-kubeconfig,omitempty"`
KubeConfigMode string `json:"write-kubeconfig-mode,omitempty"`
Disable []string `json:"disable,omitempty"`
}
//NewK3s returns a new k3s driver
func NewK3s() K3s {
//TODO: Allow for configuration overrides
return K3s{
Config: K3sConfig{
DataDir: "/var/lib/rancher/k3s",
KubeConfig: "/etc/rancher/k3s/k3s.yaml",
KubeConfigMode: "0644",
Disable: []string{},
},
}
}
func (k K3s) Name() string { return "k3s" }
func (k K3s) KubeConfigPath() string { return k.Config.KubeConfig }
func (k K3s) DataPath(elem ...string) string {
base := []string{k.Config.DataDir}
return filepath.Join(append(base, elem...)...)
}
func (k K3s) WriteConfig() error {
kCfgPath := filepath.Dir(k.Config.KubeConfig)
if err := os.MkdirAll(kCfgPath, os.ModePerm); err != nil {
return err
}
data, err := yaml.Marshal(k.Config)
c := make(map[string]interface{})
if err := yaml.Unmarshal(data, &c); err != nil {
return err
}
var uc map[string]interface{}
path := filepath.Join(kCfgPath, "config.yaml")
if data, err := os.ReadFile(path); err != nil {
err := yaml.Unmarshal(data, &uc)
if err != nil {
return err
}
}
//Merge with user defined configs taking precedence
if err := mergo.Merge(&c, uc); err != nil {
return err
}
mergedData, err := yaml.Marshal(&c)
if err != nil {
return err
}
return os.WriteFile(path, mergedData, 0644)
}
func (k K3s) Images(ctx context.Context) (map[name.Reference]v1.Image, error) {
imgs, err := k.listImages()
if err != nil {
return nil, err
}
return images.ResolveRemoteRefs(imgs...)
}
func (k K3s) Binary() (io.ReadCloser, error) {
u, err := url.Parse(fmt.Sprintf("%s/%s/%s", k3sReleaseUrl, k.Version, k.Name()))
if err != nil {
return nil, err
}
resp, err := http.Get(u.String())
if err != nil || resp.StatusCode != 200 {
return nil, fmt.Errorf("failed to return executable for k3s %s from %s", k.Version, u.String())
}
return resp.Body, nil
}
//SystemObjects returns a slice of object.ObjMetadata required for driver to be functional and accept new resources
//hauler's bootstrapping sequence will always wait for SystemObjects to be in a Ready status before proceeding
func (k K3s) SystemObjects() (objs []object.ObjMetadata) {
for _, dep := range []string{"coredns"} {
objMeta, _ := object.CreateObjMetadata("kube-system", dep, schema.GroupKind{Kind: "Deployment", Group: "apps"})
objs = append(objs, objMeta)
}
return objs
}
func (k K3s) Start(out io.Writer) error {
if err := os.WriteFile("/opt/hauler/bin/k3s-init.sh", []byte(k3sInit), 0755); err != nil {
return err
}
cmd := exec.Command("/bin/sh", "/opt/hauler/bin/k3s-init.sh")
cmd.Env = append(os.Environ(), []string{
"INSTALL_K3S_SKIP_DOWNLOAD=true",
"INSTALL_K3S_SELINUX_WARN=true",
"INSTALL_K3S_SKIP_SELINUX_RPM=true",
"INSTALL_K3S_BIN_DIR=/opt/hauler/bin",
//TODO: Provide a real dryrun option
//"INSTALL_K3S_SKIP_START=true",
}...)
cmd.Stdout = out
return cmd.Run()
}
func (k K3s) listImages() ([]string, error) {
u, err := url.Parse(fmt.Sprintf("%s/%s/k3s-images.txt", k3sReleaseUrl, k.Version))
if err != nil {
return nil, err
}
resp, err := http.Get(u.String())
if err != nil || resp.StatusCode != 200 {
return nil, fmt.Errorf("failed to return images for k3s %s from %s", k.Version, u.String())
}
defer resp.Body.Close()
var imgs []string
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
imgs = append(imgs, scanner.Text())
}
return imgs, nil
}

View File

@@ -1,211 +0,0 @@
package fs
import (
"fmt"
"github.com/rancherfederal/hauler/pkg/packager/images"
"io"
"os"
"path/filepath"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/layout"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
fleetapi "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/spf13/afero"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/downloader"
"helm.sh/helm/v3/pkg/getter"
"k8s.io/apimachinery/pkg/util/json"
)
type PkgFs struct {
FS *afero.BasePathFs
root string
}
func NewPkgFS(dir string) PkgFs {
var p PkgFs
p.FS = afero.NewBasePathFs(afero.NewOsFs(), dir).(*afero.BasePathFs)
// TODO: absolutely no way this'll bite us in the butt later...
abs, _ := filepath.Abs(dir)
p.root = abs
return p
}
func (p PkgFs) Path(elem ...string) string {
complete := []string{p.root}
return filepath.Join(append(complete, elem...)...)
}
func (p PkgFs) Bundle() PkgFs {
return PkgFs{
FS: afero.NewBasePathFs(p.FS, v1alpha1.BundlesDir).(*afero.BasePathFs),
root: p.Path(v1alpha1.BundlesDir),
}
}
func (p PkgFs) Image() PkgFs {
return PkgFs{
FS: afero.NewBasePathFs(p.FS, v1alpha1.LayoutDir).(*afero.BasePathFs),
root: p.Path(v1alpha1.LayoutDir),
}
}
func (p PkgFs) Bin() PkgFs {
return PkgFs{
FS: afero.NewBasePathFs(p.FS, v1alpha1.BinDir).(*afero.BasePathFs),
root: p.Path(v1alpha1.BinDir),
}
}
func (p PkgFs) Chart() PkgFs {
return PkgFs{
FS: afero.NewBasePathFs(p.FS, v1alpha1.ChartDir).(*afero.BasePathFs),
root: p.Path(v1alpha1.ChartDir),
}
}
//AddBundle will add a bundle to a package and all images that are autodetected from it
func (p PkgFs) AddBundle(b *fleetapi.Bundle) (map[name.Reference]v1.Image, error) {
if err := p.mkdirIfNotExists(v1alpha1.BundlesDir, os.ModePerm); err != nil {
return nil, err
}
data, err := json.Marshal(b)
if err != nil {
return nil, err
}
if err := p.Bundle().WriteFile(fmt.Sprintf("%s.json", b.Name), data, 0644); err != nil {
return nil, err
}
imgs, err := images.ImageMapFromBundle(b)
if err != nil {
return nil, err
}
return imgs, nil
}
func (p PkgFs) AddBin(r io.Reader, name string) error {
if err := p.mkdirIfNotExists(v1alpha1.BinDir, os.ModePerm); err != nil {
return err
}
f, err := p.Bin().FS.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0755)
if err != nil {
return err
}
_, err = io.Copy(f, r)
return err
}
//AddImage will add an image to the pkgfs in OCI layout fmt
//TODO: Extra work is done to ensure this is unique within the index.json
func (p PkgFs) AddImage(ref name.Reference, img v1.Image) error {
if err := p.mkdirIfNotExists(v1alpha1.LayoutDir, os.ModePerm); err != nil {
return err
}
annotations := make(map[string]string)
annotations[ocispec.AnnotationRefName] = ref.Name()
lp, err := p.layout()
if err != nil {
return err
}
//TODO: Change to ReplaceImage
return lp.AppendImage(img, layout.WithAnnotations(annotations))
}
//TODO: Not very robust
//For ref: https://github.com/helm/helm/blob/bf486a25cdc12017c7dac74d1582a8a16acd37ea/pkg/action/pull.go#L75
func (p PkgFs) AddChart(ref string, version string) error {
if err := p.mkdirIfNotExists(v1alpha1.ChartDir, os.ModePerm); err != nil {
return err
}
d := downloader.ChartDownloader{
Out: nil,
Verify: downloader.VerifyNever,
Getters: getter.All(cli.New()), // TODO: Probably shouldn't do this...
Options: []getter.Option{
getter.WithInsecureSkipVerifyTLS(true),
},
}
_, _, err := d.DownloadTo(ref, version, p.Chart().Path())
return err
}
func (p PkgFs) layout() (layout.Path, error) {
path := p.Image().Path(".")
lp, err := layout.FromPath(path)
if os.IsNotExist(err) {
lp, err = layout.Write(path, empty.Index)
}
return lp, err
}
//WriteFile is a helper method to write a file within the PkgFs
func (p PkgFs) WriteFile(name string, data []byte, perm os.FileMode) error {
f, err := p.FS.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
_, err = f.Write(data)
if err1 := f.Close(); err1 != nil && err == nil {
err = err1
}
return err
}
func (p PkgFs) MapLayout() (map[name.Reference]v1.Image, error) {
imgRefs := make(map[name.Reference]v1.Image)
//TODO: Factor this out to a Store interface
lp, err := p.layout()
if err != nil {
return nil, err
}
ii, _ := lp.ImageIndex()
im, _ := ii.IndexManifest()
for _, m := range im.Manifests {
ref, err := name.ParseReference(m.Annotations[ocispec.AnnotationRefName])
if err != nil {
return nil, err
}
img, err := lp.Image(m.Digest)
if err != nil {
return nil, err
}
imgRefs[ref] = img
}
return imgRefs, err
}
//TODO: Is this actually faster than just os.MkdirAll?
func (p PkgFs) mkdirIfNotExists(dir string, perm os.FileMode) error {
_, err := os.Stat(p.Path(dir))
if os.IsNotExist(err) {
mkdirErr := p.FS.MkdirAll(dir, perm)
if mkdirErr != nil {
return mkdirErr
}
}
return nil
}

View File

@@ -1,39 +0,0 @@
package kube
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"path/filepath"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func NewKubeConfig() (*rest.Config, error) {
loadingRules := &clientcmd.ClientConfigLoadingRules{
Precedence: []string{
filepath.Join("/etc/rancher/k3s/k3s.yaml"),
filepath.Join("/etc/rancher/rke2/rke2.yaml"),
},
WarnIfAllMissing: true,
}
cfgOverrides := &clientcmd.ConfigOverrides{}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, cfgOverrides)
return kubeConfig.ClientConfig()
}
//NewClient returns a fresh kube client
func NewClient() (client.Client, error) {
cfg, err := NewKubeConfig()
if err != nil {
return nil, err
}
scheme := runtime.NewScheme()
return client.New(cfg, client.Options{
Scheme: scheme,
})
}

View File

@@ -1,92 +0,0 @@
package kube
import (
"context"
"errors"
"fmt"
"k8s.io/client-go/rest"
"sigs.k8s.io/cli-utils/pkg/kstatus/polling"
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator"
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector"
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/event"
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
"sigs.k8s.io/cli-utils/pkg/object"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"strings"
"time"
)
type StatusChecker struct {
poller *polling.StatusPoller
client client.Client
interval time.Duration
timeout time.Duration
}
func NewStatusChecker(kubeConfig *rest.Config, interval time.Duration, timeout time.Duration) (*StatusChecker, error) {
restMapper, err := apiutil.NewDynamicRESTMapper(kubeConfig)
if err != nil {
return nil, err
}
c, err := client.New(kubeConfig, client.Options{Mapper: restMapper})
if err != nil {
return nil, err
}
return &StatusChecker{
poller: polling.NewStatusPoller(c, restMapper),
client: c,
interval: interval,
timeout: timeout,
}, nil
}
func (c *StatusChecker) WaitForCondition(objs ...object.ObjMetadata) error {
ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
defer cancel()
eventsChan := c.poller.Poll(ctx, objs, polling.Options{
PollInterval: c.interval,
UseCache: true,
})
coll := collector.NewResourceStatusCollector(objs)
done := coll.ListenWithObserver(eventsChan, desiredStatusNotifierFunc(cancel, status.CurrentStatus))
<-done
for _, rs := range coll.ResourceStatuses {
switch rs.Status {
case status.CurrentStatus:
fmt.Printf("%s: %s ready\n", rs.Identifier.Name, strings.ToLower(rs.Identifier.GroupKind.Kind))
case status.NotFoundStatus:
fmt.Println(fmt.Errorf("%s: %s not found", rs.Identifier.Name, strings.ToLower(rs.Identifier.GroupKind.Kind)))
default:
fmt.Println(fmt.Errorf("%s: %s not ready", rs.Identifier.Name, strings.ToLower(rs.Identifier.GroupKind.Kind)))
}
}
if coll.Error != nil || ctx.Err() == context.DeadlineExceeded {
return errors.New("timed out waiting for condition")
}
return nil
}
// desiredStatusNotifierFunc returns an Observer function for the
// ResourceStatusCollector that will cancel the context (using the cancelFunc)
// when all resources have reached the desired status.
func desiredStatusNotifierFunc(cancelFunc context.CancelFunc, desired status.Status) collector.ObserverFunc {
return func(rsc *collector.ResourceStatusCollector, _ event.Event) {
var rss []*event.ResourceStatus
for _, rs := range rsc.ResourceStatuses {
rss = append(rss, rs)
}
aggStatus := aggregator.AggregateStatus(rss, desired)
if aggStatus == desired {
cancelFunc()
}
}
}

136
pkg/layout/artifact.go Normal file
View File

@@ -0,0 +1,136 @@
package layout
import (
"bytes"
"encoding/json"
"io"
"os"
"github.com/google/go-containerregistry/pkg/name"
gv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/layout"
gtypes "github.com/google/go-containerregistry/pkg/v1/types"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/sync/errgroup"
"github.com/rancherfederal/hauler/pkg/artifact"
)
// Path is a wrapper around layout.Path
type Path struct {
layout.Path
}
// FromPath returns a new Path or creates one if one doesn't exist
func FromPath(path string) (Path, error) {
p, err := layout.FromPath(path)
if os.IsNotExist(err) {
p, err = layout.Write(path, empty.Index)
if err != nil {
return Path{}, err
}
}
return Path{Path: p}, err
}
// WriteOci will write oci content (artifact.OCI) to the given Path
func (l Path) WriteOci(o artifact.OCI, reference name.Reference) (ocispec.Descriptor, error) {
layers, err := o.Layers()
if err != nil {
return ocispec.Descriptor{}, err
}
// Write layers concurrently
var g errgroup.Group
for _, layer := range layers {
layer := layer
g.Go(func() error {
return l.writeLayer(layer)
})
}
if err := g.Wait(); err != nil {
return ocispec.Descriptor{}, err
}
// Write the config
cfgBlob, err := o.RawConfig()
if err != nil {
return ocispec.Descriptor{}, err
}
if err = l.writeBlob(cfgBlob); err != nil {
return ocispec.Descriptor{}, err
}
m, err := o.Manifest()
if err != nil {
return ocispec.Descriptor{}, err
}
manifest, err := json.Marshal(m)
if err != nil {
return ocispec.Descriptor{}, err
}
if err := l.writeBlob(manifest); err != nil {
return ocispec.Descriptor{}, err
}
desc := ocispec.Descriptor{
MediaType: o.MediaType(),
Size: int64(len(manifest)),
Digest: digest.FromBytes(manifest),
Annotations: map[string]string{
ocispec.AnnotationRefName: reference.Name(),
},
}
if err := l.appendDescriptor(desc); err != nil {
return ocispec.Descriptor{}, err
}
return desc, nil
}
// writeBlob differs from layer.WriteBlob in that it requires data instead
func (l Path) writeBlob(data []byte) error {
h, _, err := gv1.SHA256(bytes.NewReader(data))
if err != nil {
return err
}
return l.WriteBlob(h, io.NopCloser(bytes.NewReader(data)))
}
// writeLayer is a verbatim reimplementation of layout.writeLayer
func (l Path) writeLayer(layer gv1.Layer) error {
d, err := layer.Digest()
if err != nil {
return err
}
r, err := layer.Compressed()
if err != nil {
return err
}
return l.WriteBlob(d, r)
}
// appendDescriptor is a helper that translates a ocispec.Descriptor into a gv1.Descriptor
func (l Path) appendDescriptor(desc ocispec.Descriptor) error {
gdesc := gv1.Descriptor{
MediaType: gtypes.MediaType(desc.MediaType),
Size: desc.Size,
Digest: gv1.Hash{
Algorithm: desc.Digest.Algorithm().String(),
Hex: desc.Digest.Hex(),
},
URLs: desc.URLs,
Annotations: desc.Annotations,
}
return l.AppendDescriptor(gdesc)
}

186
pkg/layout/store.go Normal file
View File

@@ -0,0 +1,186 @@
package layout
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/remotes/docker"
"github.com/google/go-containerregistry/pkg/name"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
orascontent "oras.land/oras-go/pkg/content"
"oras.land/oras-go/pkg/oras"
"github.com/rancherfederal/hauler/pkg/artifact/types"
)
// OCIStore represents a content compatible store adhering by the oci-layout spec
type OCIStore struct {
content.Store
root string
index *ocispec.Index
digestMap map[string]ocispec.Descriptor
}
// Copy placeholder until we migrate to oras 0.5
// Will loop through each appropriately named index and copy the contents to the desired registry
func Copy(ctx context.Context, s *OCIStore, registry string) error {
for _, desc := range s.index.Manifests {
manifestBlobPath, err := s.blobPath(desc.Digest)
if err != nil {
return err
}
manifestData, err := os.ReadFile(manifestBlobPath)
if err != nil {
return err
}
m, mdesc, err := loadManifest(manifestData)
if err != nil {
return err
}
refName, ok := desc.Annotations[ocispec.AnnotationRefName]
if !ok {
return fmt.Errorf("no name found to push image")
}
rref, err := RelocateReference(refName, registry)
if err != nil {
return err
}
resolver := docker.NewResolver(docker.ResolverOptions{})
_, err = oras.Push(ctx, resolver, rref.Name(), s, m.Layers,
oras.WithConfig(m.Config), oras.WithNameValidation(nil), oras.WithManifest(mdesc))
if err != nil {
return err
}
}
return nil
}
// NewOCIStore will return a new OCIStore given a path to an oci-layout compatible directory
func NewOCIStore(path string) (*OCIStore, error) {
fs, err := local.NewStore(path)
if err != nil {
return nil, err
}
store := &OCIStore{
Store: fs,
root: path,
}
if err := store.validateOCILayout(); err != nil {
return nil, err
}
if err := store.LoadIndex(); err != nil {
return nil, nil
}
return store, nil
}
// LoadIndex will load an oci-layout compatible directory
func (s *OCIStore) LoadIndex() error {
path := filepath.Join(s.root, types.OCIImageIndexFile)
indexFile, err := os.Open(path)
if err != nil {
// TODO: Don't just bomb out?
return err
}
defer indexFile.Close()
if err := json.NewDecoder(indexFile).Decode(&s.index); err != nil {
return err
}
s.digestMap = make(map[string]ocispec.Descriptor)
for _, desc := range s.index.Manifests {
if name := desc.Annotations[ocispec.AnnotationRefName]; name != "" {
s.digestMap[name] = desc
}
}
return nil
}
func (s *OCIStore) validateOCILayout() error {
layoutFilePath := filepath.Join(s.root, ocispec.ImageLayoutFile)
layoutFile, err := os.Open(layoutFilePath)
if err != nil {
return err
}
defer layoutFile.Close()
var layout *ocispec.ImageLayout
if err := json.NewDecoder(layoutFile).Decode(&layout); err != nil {
return err
}
if layout.Version != ocispec.ImageLayoutVersion {
return orascontent.ErrUnsupportedVersion
}
return nil
}
func (s *OCIStore) blobPath(d digest.Digest) (string, error) {
if err := d.Validate(); err != nil {
return "", err
}
return filepath.Join(s.root, "blobs", d.Algorithm().String(), d.Hex()), nil
}
// manifest is a field wrapper around ocispec.Manifest that contains the mediaType field
type manifest struct {
ocispec.Manifest `json:",inline"`
MediaType string `json:"mediaType"`
}
// loadManifest
func loadManifest(data []byte) (ocispec.Manifest, ocispec.Descriptor, error) {
var m manifest
if err := json.Unmarshal(data, &m); err != nil {
return ocispec.Manifest{}, ocispec.Descriptor{}, err
}
desc := ocispec.Descriptor{
MediaType: m.MediaType,
Digest: digest.FromBytes(data),
Size: int64(len(data)),
}
return m.Manifest, desc, nil
}
// RelocateReference returns a name.Reference given a reference and registry
func RelocateReference(reference string, registry string) (name.Reference, error) {
ref, err := name.ParseReference(reference)
if err != nil {
return nil, err
}
relocated, err := name.ParseReference(ref.Context().RepositoryStr(), name.WithDefaultRegistry(registry))
if err != nil {
return nil, err
}
if _, err := name.NewDigest(ref.Name()); err == nil {
return relocated.Context().Digest(ref.Identifier()), nil
}
return relocated.Context().Tag(ref.Identifier()), nil
}

View File

@@ -1,73 +1,91 @@
package log
import (
"github.com/pterm/pterm"
"context"
"io"
"os"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
// Logger provides an interface for all used logger features regardless of logging backend
type Logger interface {
SetLevel(string)
With(Fields) *logger
WithContext(context.Context) context.Context
Errorf(string, ...interface{})
Infof(string, ...interface{})
Warnf(string, ...interface{})
Debugf(string, ...interface{})
Successf(string, ...interface{})
}
type standardLogger struct {
//TODO: Actually check this
level string
type logger struct {
zl zerolog.Logger
}
type Event struct {
id int
message string
}
// Fields defines fields to attach to log msgs
type Fields map[string]string
var (
invalidArgMessage = Event{1, "Invalid arg: %s"}
)
func NewLogger(out io.Writer) *standardLogger {
return &standardLogger{}
}
func (l *standardLogger) Errorf(format string, args ...interface{}) {
l.logf("error", format, args...)
}
func (l *standardLogger) Infof(format string, args ...interface{}) {
l.logf("info", format, args...)
}
func (l *standardLogger) Warnf(format string, args ...interface{}) {
l.logf("warn", format, args...)
}
func (l *standardLogger) Debugf(format string, args ...interface{}) {
l.logf("debug", format, args...)
}
func (l *standardLogger) Successf(format string, args ...interface{}) {
l.logf("success", format, args...)
}
func (l *standardLogger) logf(level string, format string, args ...interface{}) {
switch level {
case "debug":
pterm.Debug.Printfln(format, args...)
case "info":
pterm.Info.Printfln(format, args...)
case "warn":
pterm.Warning.Printfln(format, args...)
case "success":
pterm.Success.Printfln(format, args...)
case "error":
pterm.Error.Printfln(format, args...)
default:
pterm.Error.Printfln("%s is not a valid log level", level)
// NewLogger returns a new Logger
func NewLogger(out io.Writer) Logger {
l := log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
return &logger{
zl: l.With().Timestamp().Logger(),
}
}
func (l *standardLogger) InvalidArg(arg string) {
l.Errorf(invalidArgMessage.message, arg)
// FromContext returns a Logger from a context if it exists
func FromContext(ctx context.Context) Logger {
zl := zerolog.Ctx(ctx)
return &logger{
zl: *zl,
}
}
// SetLevel sets the global log level
func (l *logger) SetLevel(level string) {
lvl, err := zerolog.ParseLevel(level)
if err != nil {
lvl, _ = zerolog.ParseLevel("info")
}
zerolog.SetGlobalLevel(lvl)
}
// WithContext stores the Logger in the given context and returns it
func (l *logger) WithContext(ctx context.Context) context.Context {
return l.zl.WithContext(ctx)
}
// With attaches Fields to a Logger
func (l *logger) With(fields Fields) *logger {
zl := l.zl.With()
for k, v := range fields {
zl = zl.Str(k, v)
}
return &logger{
zl: zl.Logger(),
}
}
// Errorf prints a formatted ERR message
func (l *logger) Errorf(format string, args ...interface{}) {
l.zl.Error().Msgf(format, args...)
}
// Infof prints a formatted INFO message
func (l *logger) Infof(format string, args ...interface{}) {
l.zl.Info().Msgf(format, args...)
}
// Warnf prints a formatted WARN message
func (l *logger) Warnf(format string, args ...interface{}) {
l.zl.Warn().Msgf(format, args...)
}
// Debugf prints a formatted DBG message
func (l *logger) Debugf(format string, args ...interface{}) {
l.zl.Debug().Msgf(format, args...)
}

View File

@@ -1,40 +0,0 @@
package oci
import (
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/layout"
)
const refNameAnnotation = "org.opencontainers.image.ref.name"
func getIndexManifestsDescriptors(layout layout.Path) []v1.Descriptor {
imageIndex, err := layout.ImageIndex()
if err != nil {
return nil
}
indexManifests, err := imageIndex.IndexManifest()
if err != nil {
return nil
}
return indexManifests.Manifests
}
func ListDigests(layout layout.Path) []v1.Hash {
var digests []v1.Hash
for _, desc := range getIndexManifestsDescriptors(layout) {
digests = append(digests, desc.Digest)
}
return digests
}
func ListImages(layout layout.Path) map[string]v1.Hash {
images := make(map[string]v1.Hash)
for _, desc := range getIndexManifestsDescriptors(layout) {
if image, ok := desc.Annotations[refNameAnnotation]; ok {
images[image] = desc.Digest
}
}
return images
}

View File

@@ -1,74 +0,0 @@
package oci
import (
"fmt"
"github.com/google/go-containerregistry/pkg/v1/empty"
"os"
"testing"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/layout"
"github.com/google/go-containerregistry/pkg/v1/random"
)
func Test_ListImages(t *testing.T) {
tmpdir, err := os.MkdirTemp(".", "hauler")
if err != nil {
t.Errorf("failed to setup test scaffolding: %v", err)
}
defer os.RemoveAll(tmpdir)
img, err := random.Image(1024, 5)
if err != nil {
fmt.Printf("error creating test image: %v", err)
}
ly, err := createLayout(img, tmpdir)
if err != nil {
t.Errorf("%v", err)
}
dg, err := getDigest(img)
if err != nil {
t.Errorf("%v", err)
}
m := ListImages(ly)
for _, hash := range m {
if hash != dg {
t.Errorf("error got %v want %v", hash, dg)
}
}
}
func createLayout(img v1.Image, path string) (layout.Path, error) {
p, err := layout.FromPath(path)
if os.IsNotExist(err) {
p, err = layout.Write(path, empty.Index)
if err != nil {
return "", err
}
}
if err != nil {
return "", fmt.Errorf("error creating layout: %v", err)
}
if err := p.AppendImage(img); err != nil {
return "", err
}
return p, nil
}
func getDigest(img v1.Image) (v1.Hash, error) {
digest, err := img.Digest()
if err != nil {
return v1.Hash{}, fmt.Errorf("error getting digest: %v", err)
}
return digest, nil
}

View File

@@ -1,79 +0,0 @@
package oci
import (
"context"
"fmt"
"os"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/deislabs/oras/pkg/content"
"github.com/deislabs/oras/pkg/oras"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
haulerMediaType = "application/vnd.oci.image"
)
// Get wraps the oras go module to get artifacts from a registry
func Get(ctx context.Context, src string, dst string) error {
store := content.NewFileStore(dst)
defer store.Close()
resolver, err := resolver()
if err != nil {
return err
}
allowedMediaTypes := []string{
haulerMediaType,
}
// Pull file(s) from registry and save to disk
fmt.Printf("pulling from %s and saving to %s\n", src, dst)
desc, _, err := oras.Pull(ctx, resolver, src, store, oras.WithAllowedMediaTypes(allowedMediaTypes))
if err != nil {
return err
}
fmt.Printf("pulled from %s with digest %s\n", src, desc.Digest)
return nil
}
// Put wraps the oras go module to put artifacts into a registry
func Put(ctx context.Context, src string, dst string) error {
data, err := os.ReadFile(src)
if err != nil {
return err
}
resolver, err := resolver()
if err != nil {
return err
}
store := content.NewMemoryStore()
contents := []ocispec.Descriptor{
store.Add(src, haulerMediaType, data),
}
desc, err := oras.Push(ctx, resolver, dst, store, contents)
if err != nil {
return err
}
fmt.Printf("pushed %s to %s with digest: %s", src, dst, desc.Digest)
return nil
}
func resolver() (remotes.Resolver, error) {
resolver := docker.NewResolver(docker.ResolverOptions{PlainHTTP: true})
return resolver, nil
}

View File

@@ -1,59 +0,0 @@
package oci
import (
"context"
"fmt"
"io/ioutil"
"net/http/httptest"
"net/url"
"os"
"testing"
"time"
"github.com/google/go-containerregistry/pkg/registry"
)
const timeout = 1 * time.Minute
func Test_Get_Put(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
// Set up a fake registry.
s := httptest.NewServer(registry.New())
defer s.Close()
u, err := url.Parse(s.URL)
if err != nil {
t.Fatal(err)
}
file, err := ioutil.TempFile(".", "artifact.txt")
if err != nil {
t.Fatal(err)
}
text := []byte("Some stuff!")
if _, err = file.Write(text); err != nil {
t.Fatal(err)
}
img := fmt.Sprintf("%s/artifact:latest", u.Host)
if err := Put(ctx, file.Name(), img); err != nil {
t.Fatal(err)
}
dir, err := ioutil.TempDir(".", "tmp")
if err != nil {
t.Fatal(err)
}
if err := Get(ctx, img, dir); err != nil {
t.Fatal(err)
}
defer os.Remove(file.Name())
defer os.RemoveAll(dir)
}

View File

@@ -1,48 +0,0 @@
package packager
import (
"fmt"
"os"
"path/filepath"
"github.com/mholt/archiver/v3"
)
type Archiver interface {
String() string
Archive([]string, string) error
Unarchive(string, string) error
}
func NewArchiver() Archiver {
return &archiver.TarZstd{
Tar: &archiver.Tar{
OverwriteExisting: true,
MkdirAll: true,
ImplicitTopLevelFolder: false,
StripComponents: 0,
ContinueOnError: false,
},
}
}
func Package(a Archiver, src string, output string) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
defer os.Chdir(cwd)
err = os.Chdir(src)
if err != nil {
return err
}
path := filepath.Join(cwd, fmt.Sprintf("%s.%s", output, a.String()))
return a.Archive([]string{"."}, path)
}
func Unpackage(a Archiver, src, dest string) error {
return a.Unarchive(src, dest)
}

View File

@@ -1,164 +0,0 @@
package images
import (
"bytes"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/remote"
fleetapi "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
"github.com/rancher/fleet/pkg/helmdeployer"
"github.com/rancher/fleet/pkg/manifest"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/client-go/util/jsonpath"
"strings"
)
type Imager interface {
Images() ([]string, error)
}
type discoveredImages []string
func (d discoveredImages) Images() ([]string, error) {
return d, nil
}
//MapImager will gather images from various Imager sources and return a single slice
func MapImager(imager ...Imager) (map[name.Reference]v1.Image, error) {
m := make(map[name.Reference]v1.Image)
for _, i := range imager {
ims, err := i.Images()
if err != nil {
return nil, err
}
remoteMap, err := ResolveRemoteRefs(ims...)
if err != nil {
return nil, err
}
//TODO: Is there a more efficient way to merge?
for k, v := range remoteMap {
m[k] = v
}
}
return m, nil
}
func ImageMapFromBundle(b *fleetapi.Bundle) (map[name.Reference]v1.Image, error) {
opts := fleetapi.BundleDeploymentOptions{}
//TODO: Why doesn't fleet do this...
if b.Spec.Helm != nil {
opts.Helm = b.Spec.Helm
}
if b.Spec.Kustomize != nil {
opts.Kustomize = b.Spec.Kustomize
}
if b.Spec.YAML != nil {
opts.YAML = b.Spec.YAML
}
m, err := manifest.New(&b.Spec)
if err != nil {
return nil, err
}
//TODO: I think this is right?
objs, err := helmdeployer.Template(b.Name, m, opts)
if err != nil {
return nil, err
}
var di discoveredImages
for _, o := range objs {
imgs, err := imageFromRuntimeObject(o.(*unstructured.Unstructured))
if err != nil {
return nil, err
}
di = append(di, imgs...)
}
return ResolveRemoteRefs(di...)
}
//ResolveRemoteRefs will return a slice of remote images resolved from their fully qualified name
func ResolveRemoteRefs(images ...string) (map[name.Reference]v1.Image, error) {
m := make(map[name.Reference]v1.Image)
for _, i := range images {
if i == "" {
continue
}
//TODO: This will error out if remote is a v1 image, do better error handling for this
ref, err := name.ParseReference(i)
if err != nil {
return nil, err
}
img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
if err != nil {
return nil, err
}
m[ref] = img
}
return m, nil
}
//TODO: Add user defined paths
var knownImagePaths = []string{
// Deployments & DaemonSets
"{.spec.template.spec.initContainers[*].image}",
"{.spec.template.spec.containers[*].image}",
// Pods
"{.spec.initContainers[*].image}",
"{.spec.containers[*].image}",
}
//imageFromRuntimeObject will return any images found in known obj specs
func imageFromRuntimeObject(obj *unstructured.Unstructured) (images []string, err error) {
objData, _ := obj.MarshalJSON()
var data interface{}
if err := json.Unmarshal(objData, &data); err != nil {
return nil, err
}
j := jsonpath.New("")
j.AllowMissingKeys(true)
for _, path := range knownImagePaths {
r, err := parseJSONPath(data, j, path)
if err != nil {
return nil, err
}
images = append(images, r...)
}
return images, nil
}
func parseJSONPath(input interface{}, parser *jsonpath.JSONPath, template string) ([]string, error) {
buf := new(bytes.Buffer)
if err := parser.Parse(template); err != nil {
return nil, err
}
if err := parser.Execute(buf, input); err != nil {
return nil, err
}
f := func(s rune) bool { return s == ' ' }
r := strings.FieldsFunc(buf.String(), f)
return r, nil
}

View File

@@ -1,84 +0,0 @@
package images
import (
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/client-go/util/jsonpath"
"reflect"
"testing"
)
var (
jsona = []byte(`{
"flatImage": "name/of/image:with-tag",
"deeply": {
"nested": {
"image": "another/image/name:with-a-tag",
"set": [
{ "image": "first/in/list:123" },
{ "image": "second/in:456" }
]
}
}
}`)
)
func Test_parseJSONPath(t *testing.T) {
var data interface{}
if err := json.Unmarshal(jsona, &data); err != nil {
t.Errorf("failed to unmarshal test article, %v", err)
}
j := jsonpath.New("")
type args struct {
input interface{}
name string
template string
}
tests := []struct {
name string
args args
want []string
wantErr bool
}{
{
name: "should find flat path with string result",
args: args{
input: data,
name: "wut",
template: "{.flatImage}",
},
want: []string{"name/of/image:with-tag"},
},
{
name: "should find nested path with string result",
args: args{
input: data,
name: "wut",
template: "{.deeply.nested.image}",
},
want: []string{"another/image/name:with-a-tag"},
},
{
name: "should find nested path with slice result",
args: args{
input: data,
name: "wut",
template: "{.deeply.nested.set[*].image}",
},
want: []string{"first/in/list:123", "second/in:456"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := parseJSONPath(tt.args.input, j, tt.args.template)
if (err != nil) != tt.wantErr {
t.Errorf("parseJSONPath() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("parseJSONPath() got = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -1,177 +0,0 @@
package packager
import (
"context"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
fleetapi "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
"github.com/rancher/fleet/pkg/bundle"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/driver"
"github.com/rancherfederal/hauler/pkg/fs"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/packager/images"
"k8s.io/apimachinery/pkg/util/json"
"path/filepath"
)
type Packager interface {
Archive(Archiver, v1alpha1.Package, string) error
PackageBundles(context.Context, ...string) ([]*fleetapi.Bundle, error)
PackageDriver(context.Context, driver.Driver) error
PackageFleet(context.Context, v1alpha1.Fleet) error
PackageImages(context.Context, ...string) error
}
type pkg struct {
fs fs.PkgFs
logger log.Logger
}
//NewPackager loads a new packager given a path on disk
func NewPackager(path string, logger log.Logger) Packager {
return pkg{
fs: fs.NewPkgFS(path),
logger: logger,
}
}
func (p pkg) Archive(a Archiver, pkg v1alpha1.Package, output string) error {
data, err := json.Marshal(pkg)
if err != nil {
return err
}
if err = p.fs.WriteFile("package.json", data, 0644); err != nil {
return err
}
return Package(a, p.fs.Path(), output)
}
func (p pkg) PackageBundles(ctx context.Context, path ...string) ([]*fleetapi.Bundle, error) {
p.logger.Infof("Packaging %d bundle(s)", len(path))
opts := &bundle.Options{
Compress: true,
}
var cImgs int
var bundles []*fleetapi.Bundle
for _, pth := range path {
p.logger.Infof("Creating bundle from path: %s", pth)
bundleName := filepath.Base(pth)
fb, err := bundle.Open(ctx, bundleName, pth, "", opts)
if err != nil {
return nil, err
}
//TODO: Figure out why bundle.Open doesn't return with GVK
bn := fleetapi.NewBundle("fleet-local", bundleName, *fb.Definition)
imgs, err := p.fs.AddBundle(bn)
if err != nil {
return nil, err
}
if err := p.pkgImages(ctx, imgs); err != nil {
return nil, err
}
bundles = append(bundles, bn)
cImgs += len(imgs)
}
p.logger.Successf("Finished packaging %d bundle(s) along with %d autodetected image(s)", len(path), cImgs)
return bundles, nil
}
func (p pkg) PackageDriver(ctx context.Context, d driver.Driver) error {
p.logger.Infof("Packaging %s components", d.Name())
p.logger.Infof("Adding %s executable to package", d.Name())
rc, err := d.Binary()
if err != nil {
return err
}
if err := p.fs.AddBin(rc, d.Name()); err != nil {
return err
}
rc.Close()
p.logger.Infof("Adding required images for %s to package", d.Name())
imgMap, err := d.Images(ctx)
if err != nil {
return err
}
err = p.pkgImages(ctx, imgMap)
if err != nil {
return err
}
p.logger.Successf("Finished packaging %s components", d.Name())
return nil
}
func (p pkg) PackageImages(ctx context.Context, imgs ...string) error {
p.logger.Infof("Packaging %d user defined images", len(imgs))
imgMap, err := images.ResolveRemoteRefs(imgs...)
if err != nil {
return err
}
if err := p.pkgImages(ctx, imgMap); err != nil {
return err
}
p.logger.Successf("Finished packaging %d user defined images", len(imgs))
return nil
}
//TODO: Add this to PackageDriver?
func (p pkg) PackageFleet(ctx context.Context, fl v1alpha1.Fleet) error {
p.logger.Infof("Packaging fleet components")
imgMap, err := images.MapImager(fl)
if err != nil {
return err
}
if err := p.pkgImages(ctx, imgMap); err != nil {
return err
}
p.logger.Infof("Adding fleet crds to package")
if err := p.fs.AddChart(fl.CRDChart(), fl.Version); err != nil {
return err
}
p.logger.Infof("Adding fleet to package")
if err := p.fs.AddChart(fl.Chart(), fl.Version); err != nil {
return err
}
p.logger.Successf("Finished packaging fleet components")
return nil
}
//pkgImages is a helper function to loop through an image map and add it to a layout
func (p pkg) pkgImages(ctx context.Context, imgMap map[name.Reference]v1.Image) error {
var i int
for ref, im := range imgMap {
p.logger.Infof("Packaging image (%d/%d): %s", i+1, len(imgMap), ref.Name())
if err := p.fs.AddImage(ref, im); err != nil {
return err
}
i++
}
return nil
}

148
pkg/store/add.go Normal file
View File

@@ -0,0 +1,148 @@
package store
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"github.com/google/go-containerregistry/pkg/name"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/rancherfederal/hauler/pkg/artifact"
"github.com/rancherfederal/hauler/pkg/cache"
"github.com/rancherfederal/hauler/pkg/layout"
"github.com/rancherfederal/hauler/pkg/log"
)
// AddArtifact will add an artifact.OCI to the store
// The method to achieve this is to save artifact.OCI to a temporary directory in an OCI layout compatible form. Once
// saved, the entirety of the layout is copied to the store (which is just a registry). This allows us to not only use
// strict types to define generic content, but provides a processing pipeline suitable for extensibility. In the
// future we'll allow users to define their own content that must adhere either by artifact.OCI or simply an OCI layout.
func (s *Store) AddArtifact(ctx context.Context, oci artifact.OCI, reference name.Reference) (ocispec.Descriptor, error) {
lgr := log.FromContext(ctx)
if err := s.precheck(); err != nil {
return ocispec.Descriptor{}, err
}
stg, err := newOciStage()
if err != nil {
return ocispec.Descriptor{}, err
}
if s.cache != nil {
cached := cache.Oci(oci, s.cache)
oci = cached
}
lgr.Debugf("staging %s", reference.Name())
if err := stg.add(ctx, oci, reference); err != nil {
return ocispec.Descriptor{}, err
}
return stg.commit(ctx, s)
}
// Flush is a fancy name for delete-all-the-things, in this case it's as trivial as deleting everything in the underlying store directory
// This can be a highly destructive operation if the store's directory happens to be inline with other non-store contents
// To reduce the blast radius and likelihood of deleting things we don't own, Flush explicitly includes docker/registry/v2
// in the search dir
func (s *Store) Flush(ctx context.Context) error {
contentDir := filepath.Join(s.DataDir, "docker", "registry", "v2")
fs, err := ioutil.ReadDir(contentDir)
if !os.IsNotExist(err) && err != nil {
return err
}
for _, f := range fs {
err := os.RemoveAll(filepath.Join(contentDir, f.Name()))
if err != nil {
return err
}
}
return nil
}
// AddCollection .
func (s *Store) AddCollection(ctx context.Context, coll artifact.Collection) ([]ocispec.Descriptor, error) {
lgr := log.FromContext(ctx)
_ = lgr
if err := s.precheck(); err != nil {
return nil, err
}
cnts, err := coll.Contents()
if err != nil {
return nil, err
}
for ref, o := range cnts {
if _, err := s.AddArtifact(ctx, o, ref); err != nil {
return nil, nil
}
}
return nil, err
}
type stager interface {
// add adds an artifact.OCI to the stage
add(artifact.OCI) error
// commit pushes all the staged contents into the store and closes the stage
commit(*Store) error
// close flushes and closes the stage
close() error
}
type oci struct {
layout layout.Path
root string
}
func (o *oci) add(ctx context.Context, oci artifact.OCI, reference name.Reference) error {
mdesc, err := o.layout.WriteOci(oci, reference)
if err != nil {
return err
}
_ = mdesc
return nil
}
func (o *oci) commit(ctx context.Context, s *Store) (ocispec.Descriptor, error) {
ts, err := layout.NewOCIStore(o.root)
if err != nil {
return ocispec.Descriptor{}, err
}
err = layout.Copy(ctx, ts, s.Registry())
defer o.close()
return ocispec.Descriptor{}, err
}
func (o *oci) close() error {
return os.RemoveAll(o.root)
}
func newOciStage() (*oci, error) {
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
return nil, err
}
l, err := layout.FromPath(tmpdir)
if err != nil {
return nil, err
}
return &oci{
layout: l,
root: tmpdir,
}, nil
}

20
pkg/store/options.go Normal file
View File

@@ -0,0 +1,20 @@
package store
import "github.com/rancherfederal/hauler/pkg/cache"
// Options defines options for Store
type Options func(*Store)
// WithCache initializes a Store with a cache.Cache, all content added to the Store will first be cached
func WithCache(c cache.Cache) Options {
return func(s *Store) {
s.cache = c
}
}
// WithDefaultRepository sets the default repository to use when none is specified (defaults to "library")
func WithDefaultRepository(repo string) Options {
return func(s *Store) {
s.DefaultRepository = repo
}
}

214
pkg/store/store.go Normal file
View File

@@ -0,0 +1,214 @@
package store
import (
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"regexp"
"strconv"
"time"
"github.com/distribution/distribution/v3/configuration"
dcontext "github.com/distribution/distribution/v3/context"
"github.com/distribution/distribution/v3/reference"
"github.com/distribution/distribution/v3/registry/client"
"github.com/distribution/distribution/v3/registry/handlers"
"github.com/google/go-containerregistry/pkg/name"
"github.com/sirupsen/logrus"
// Init filesystem distribution storage driver
_ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem"
"github.com/rancherfederal/hauler/pkg/cache"
)
var (
httpRegex = regexp.MustCompile("https?://")
)
// Store is a simple wrapper around distribution/distribution to enable hauler's use case
type Store struct {
DataDir string
DefaultRepository string
config *configuration.Configuration
handler http.Handler
server *httptest.Server
cache cache.Cache
}
// NewStore creates a new registry store, designed strictly for use within haulers embedded operations and _not_ for serving
func NewStore(ctx context.Context, dataDir string, opts ...Options) *Store {
cfg := &configuration.Configuration{
Version: "0.1",
Storage: configuration.Storage{
"cache": configuration.Parameters{"blobdescriptor": "inmemory"},
"filesystem": configuration.Parameters{"rootdirectory": dataDir},
},
}
cfg.Log.Level = "panic"
cfg.HTTP.Headers = http.Header{"X-Content-Type-Options": []string{"nosniff"}}
handler := setupHandler(ctx, cfg)
s := &Store{
DataDir: dataDir,
config: cfg,
handler: handler,
}
for _, opt := range opts {
opt(s)
}
return s
}
// Open will create a new server and start it, it's up to the consumer to close it
func (s *Store) Open() *httptest.Server {
server := httptest.NewServer(s.handler)
s.server = server
return server
}
// Close stops the server
func (s *Store) Close() {
s.server.Close()
s.server = nil
return
}
// List will list all known content tags in the registry
// TODO: This fn is messy and needs cleanup, this is arguably easier with the catalog api as well
func (s *Store) List(ctx context.Context) ([]string, error) {
reg, err := client.NewRegistry(s.RegistryURL(), nil)
if err != nil {
return nil, err
}
entries := make(map[string]reference.Named)
last := ""
for {
chunk := make([]string, 20) // randomly chosen number...
nf, err := reg.Repositories(ctx, chunk, last)
last = strconv.Itoa(nf)
for _, e := range chunk {
if e == "" {
continue
}
ref, err := reference.WithName(e)
if err != nil {
return nil, err
}
entries[e] = ref
}
if err == io.EOF {
break
}
}
var refs []string
for ref, named := range entries {
repo, err := client.NewRepository(named, s.RegistryURL(), nil)
if err != nil {
return nil, err
}
tsvc := repo.Tags(ctx)
ts, err := tsvc.All(ctx)
if err != nil {
return nil, err
}
for _, t := range ts {
ref, err := name.ParseReference(ref, name.WithDefaultRegistry(""), name.WithDefaultTag(t))
if err != nil {
return nil, err
}
refs = append(refs, ref.Name())
}
}
return refs, nil
}
// precheck checks whether server is appropriately started and errors if it's not
// used to safely run Store operations without fear of panics
func (s *Store) precheck() error {
if s.server == nil || s.server.URL == "" {
return fmt.Errorf("server is not started yet")
}
return nil
}
// Registry returns the registries URL without the protocol, suitable for image relocation operations
func (s *Store) Registry() string {
return httpRegex.ReplaceAllString(s.server.URL, "")
}
// RegistryURL returns the registries URL
func (s *Store) RegistryURL() string {
return s.server.URL
}
func alive(path string, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == path {
w.Header().Set("Cache-Control", "no-cache")
w.WriteHeader(http.StatusOK)
return
}
handler.ServeHTTP(w, r)
})
}
// setupHandler will set up the registry handler
func setupHandler(ctx context.Context, config *configuration.Configuration) http.Handler {
ctx, _ = configureLogging(ctx, config)
app := handlers.NewApp(ctx, config)
app.RegisterHealthChecks()
handler := alive("/", app)
return handler
}
func configureLogging(ctx context.Context, cfg *configuration.Configuration) (context.Context, context.CancelFunc) {
logrus.SetLevel(logLevel(cfg.Log.Level))
formatter := cfg.Log.Formatter
if formatter == "" {
formatter = "text"
}
logrus.SetFormatter(&logrus.TextFormatter{
TimestampFormat: time.RFC3339Nano,
})
if len(cfg.Log.Fields) > 0 {
var fields []interface{}
for k := range cfg.Log.Fields {
fields = append(fields, k)
}
ctx = dcontext.WithValues(ctx, cfg.Log.Fields)
ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, fields...))
}
dcontext.SetDefaultLogger(dcontext.GetLogger(ctx))
return context.WithCancel(ctx)
}
func logLevel(level configuration.Loglevel) logrus.Level {
l, err := logrus.ParseLevel(string(level))
if err != nil {
l = logrus.InfoLevel
logrus.Warnf("error parsing log level %q: %v, using %q", level, err, l)
}
return l
}

87
pkg/store/store_test.go Normal file
View File

@@ -0,0 +1,87 @@
package store
import (
"context"
"os"
"testing"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/random"
"github.com/google/go-containerregistry/pkg/v1/remote"
)
func TestStore_List(t *testing.T) {
ctx := context.Background()
s, err := testStore(ctx)
if err != nil {
t.Fatal(err)
}
s.Open()
defer s.Close()
r := randomImage(t)
addImageToStore(t, s, r, "hauler/tester:latest")
addImageToStore(t, s, r, "hauler/tester:non")
addImageToStore(t, s, r, "other/ns:more")
addImageToStore(t, s, r, "unique/donkey:v1.2.2")
type args struct {
ctx context.Context
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "should list",
args: args{},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
refs, err := s.List(ctx)
if (err != nil) != tt.wantErr {
t.Errorf("List() error = %v, wantErr %v", err, tt.wantErr)
}
// TODO: Make this more robust
if len(refs) != 4 {
t.Errorf("Expected 4, got %d", len(refs))
}
})
}
}
func testStore(ctx context.Context) (*Store, error) {
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
return nil, err
}
s := NewStore(ctx, tmpdir)
return s, nil
}
func randomImage(t *testing.T) v1.Image {
r, err := random.Image(1024, 3)
if err != nil {
t.Fatalf("random.Image() = %v", err)
}
return r
}
func addImageToStore(t *testing.T, s *Store, image v1.Image, reference string) {
ref, err := name.ParseReference(reference, name.WithDefaultRegistry(s.Registry()))
if err != nil {
t.Error(err)
}
if err := remote.Write(ref, image); err != nil {
t.Error(err)
}
}

View File

@@ -1,92 +0,0 @@
package util
import (
"bufio"
"fmt"
"github.com/mholt/archiver/v3"
"io"
"os"
"path/filepath"
)
type dir struct {
Path string
Permission os.FileMode
}
type FSLayout struct {
Root string
dirs []dir
}
type Layout interface {
Create() error
AddDir()
Archive(archiver2 archiver.Archiver) error
Remove() error
}
func NewLayout(root string) *FSLayout {
absRoot, _ := filepath.Abs(root)
return &FSLayout{
Root: absRoot,
dirs: nil,
}
}
//Create will create the FSLayout at the FSLayout.Root
func (l FSLayout) Create() error {
for _, dir := range l.dirs {
fullPath := filepath.Join(l.Root, dir.Path)
if err := os.MkdirAll(fullPath, dir.Permission); err != nil {
return err
}
}
return nil
}
//AddDir will add a folder to the FSLayout
func (l *FSLayout) AddDir(relPath string, perm os.FileMode) {
l.dirs = append(l.dirs, dir{
Path: relPath,
Permission: perm,
})
}
func (l FSLayout) Remove() error {
return os.RemoveAll(l.Root)
}
func (l FSLayout) Archive(a *archiver.TarZstd, name string) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
err = os.Chdir(l.Root)
if err != nil {
return err
}
defer os.Chdir(cwd)
archiveFile := filepath.Join(cwd, fmt.Sprintf("%s.%s", name, a.String()))
if err := a.Archive([]string{"."}, archiveFile); err != nil {
return err
}
return nil
}
func LinesToSlice(r io.ReadCloser) ([]string, error) {
var lines []string
scanner := bufio.NewScanner(r)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
if err := scanner.Err(); err != nil {
return nil, err
}
return lines, nil
}

11
testdata/chart-collection.yaml vendored Normal file
View File

@@ -0,0 +1,11 @@
---
apiVersion: collection.hauler.cattle.io/v1alpha1
kind: ThickCharts
metadata:
name: mythickchart
spec:
charts:
# charts are also fetched and served as OCI content (currently experimental in helm)
# HELM_EXPERIMENTAL_OCI=1 helm chart pull <hauler-registry>/loki:2.6.2
- name: loki
repoURL: https://grafana.github.io/helm-charts

51
testdata/contents.yaml vendored Normal file
View File

@@ -0,0 +1,51 @@
apiVersion: content.hauler.cattle.io/v1alpha1
kind: Files
metadata:
name: myfile
spec:
files:
# hauler can save/redistribute files on disk
- ref: testdata/contents.yaml
# TODO: when directories are specified, they will be archived and stored as a file
# - ref: testdata/
# hauler can also fetch remote content, and will "smartly" identify filenames _when possible_
# filename below = "k3s-images.txt"
- ref: "https://github.com/k3s-io/k3s/releases/download/v1.22.2%2Bk3s2/k3s-images.txt"
# when filenames are not appropriate, a name should be specified
# this will still work, but default to a filename of "get.k3s.io"
- ref: https://get.k3s.io
name: get-k3s.sh
---
apiVersion: content.hauler.cattle.io/v1alpha1
kind: Images
metadata:
name: myimage
spec:
images:
# images can be referenced by their tag
- ref: rancher/k3s:v1.22.2-k3s2
# or by their digest:
- ref: registry@sha256:42043edfae481178f07aa077fa872fcc242e276d302f4ac2026d9d2eb65b955f
---
apiVersion: content.hauler.cattle.io/v1alpha1
kind: Charts
metadata:
name: mychart
spec:
charts:
# charts are also fetched and served as OCI content (currently experimental in helm)
# HELM_EXPERIMENTAL_OCI=1 helm chart pull <hauler-registry>/loki:2.6.2
- name: loki
repoURL: https://grafana.github.io/helm-charts
# version: latest # the latest version will be used when version is empty
# specific versions can also be used
- name: rancher
repoURL: https://releases.rancher.com/server-charts/latest
version: 2.6.2

View File

@@ -1,8 +0,0 @@
#defaultNamespace: fleet-system
#helm:
# chart: https://github.com/rancher/fleet/releases/download/v0.3.5/fleet-agent-0.3.5.tgz
# releaseName: fleet-agent
helm:
repo: https://charts.longhorn.io
chart: longhorn

View File

@@ -1,15 +0,0 @@
version: 2.1
jobs:
lint:
docker:
- image: twuni/helm:3.4.1
steps:
- checkout
- run:
command: helm lint --strict
name: lint
workflows:
version: 2
default:
jobs:
- lint

View File

@@ -1,21 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@@ -1,13 +0,0 @@
apiVersion: v1
appVersion: 2.7.1
description: A Helm chart for Docker Registry
home: https://hub.docker.com/_/registry/
icon: https://hub.docker.com/public/images/logos/mini-logo.svg
maintainers:
- email: devin@canterberry.cc
name: Devin Canterberry
url: https://canterberry.cc/
name: docker-registry
sources:
- https://github.com/docker/distribution-library-image
version: 1.10.1

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,94 +0,0 @@
# Docker Registry Helm Chart
This directory contains a Kubernetes chart to deploy a private Docker Registry.
## Prerequisites Details
* PV support on underlying infrastructure (if persistence is required)
## Chart Details
This chart will do the following:
* Implement a Docker registry deployment
## Installing the Chart
First, add the repo:
```console
$ helm repo add twuni https://helm.twun.io
```
To install the chart, use the following:
```console
$ helm install twuni/docker-registry
```
## Configuration
The following table lists the configurable parameters of the docker-registry chart and
their default values.
| Parameter | Description | Default |
|:----------------------------|:-------------------------------------------------------------------------------------------|:----------------|
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `image.repository` | Container image to use | `registry` |
| `image.tag` | Container image tag to deploy | `2.7.1` |
| `imagePullSecrets` | Specify image pull secrets | `nil` (does not add image pull secrets to deployed pods) |
| `persistence.accessMode` | Access mode to use for PVC | `ReadWriteOnce` |
| `persistence.enabled` | Whether to use a PVC for the Docker storage | `false` |
| `persistence.deleteEnabled` | Enable the deletion of image blobs and manifests by digest | `nil` |
| `persistence.size` | Amount of space to claim for PVC | `10Gi` |
| `persistence.storageClass` | Storage Class to use for PVC | `-` |
| `persistence.existingClaim` | Name of an existing PVC to use for config | `nil` |
| `service.port` | TCP port on which the service is exposed | `5000` |
| `service.type` | service type | `ClusterIP` |
| `service.clusterIP` | if `service.type` is `ClusterIP` and this is non-empty, sets the cluster IP of the service | `nil` |
| `service.nodePort` | if `service.type` is `NodePort` and this is non-empty, sets the node port of the service | `nil` |
| `service.loadBalancerIP` | if `service.type` is `LoadBalancer` and this is non-empty, sets the loadBalancerIP of the service | `nil` |
| `service.loadBalancerSourceRanges`| if `service.type` is `LoadBalancer` and this is non-empty, sets the loadBalancerSourceRanges of the service | `nil` |
| `service.sessionAffinity` | service session affinity | `nil` |
| `service.sessionAffinityConfig` | service session affinity config | `nil` |
| `replicaCount` | k8s replicas | `1` |
| `updateStrategy` | update strategy for deployment | `{}` |
| `podAnnotations` | Annotations for pod | `{}` |
| `podLabels` | Labels for pod | `{}` |
| `podDisruptionBudget` | Pod disruption budget | `{}` |
| `resources.limits.cpu` | Container requested CPU | `nil` |
| `resources.limits.memory` | Container requested memory | `nil` |
| `priorityClassName ` | priorityClassName | `""` |
| `storage` | Storage system to use | `filesystem` |
| `tlsSecretName` | Name of secret for TLS certs | `nil` |
| `secrets.htpasswd` | Htpasswd authentication | `nil` |
| `secrets.s3.accessKey` | Access Key for S3 configuration | `nil` |
| `secrets.s3.secretKey` | Secret Key for S3 configuration | `nil` |
| `secrets.swift.username` | Username for Swift configuration | `nil` |
| `secrets.swift.password` | Password for Swift configuration | `nil` |
| `haSharedSecret` | Shared secret for Registry | `nil` |
| `configData` | Configuration hash for docker | `nil` |
| `s3.region` | S3 region | `nil` |
| `s3.regionEndpoint` | S3 region endpoint | `nil` |
| `s3.bucket` | S3 bucket name | `nil` |
| `s3.encrypt` | Store images in encrypted format | `nil` |
| `s3.secure` | Use HTTPS | `nil` |
| `swift.authurl` | Swift authurl | `nil` |
| `swift.container` | Swift container | `nil` |
| `nodeSelector` | node labels for pod assignment | `{}` |
| `affinity` | affinity settings | `{}` |
| `tolerations` | pod tolerations | `[]` |
| `ingress.enabled` | If true, Ingress will be created | `false` |
| `ingress.annotations` | Ingress annotations | `{}` |
| `ingress.labels` | Ingress labels | `{}` |
| `ingress.path` | Ingress service path | `/` |
| `ingress.hosts` | Ingress hostnames | `[]` |
| `ingress.tls` | Ingress TLS configuration (YAML) | `[]` |
| `extraVolumeMounts` | Additional volumeMounts to the registry container | `[]` |
| `extraVolumes` | Additional volumes to the pod | `[]` |
Specify each parameter using the `--set key=value[,key=value]` argument to
`helm install`.
To generate htpasswd file, run this docker command:
`docker run --entrypoint htpasswd registry:2 -Bbn user password > ./htpasswd`.

View File

@@ -1,19 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "docker-registry.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "docker-registry.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "docker-registry.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "docker-registry.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl -n {{ .Release.Namespace }} port-forward $POD_NAME 8080:5000
{{- end }}

View File

@@ -1,24 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "docker-registry.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "docker-registry.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "docker-registry.fullname" . }}-config
labels:
app: {{ template "docker-registry.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
data:
config.yml: |-
{{ toYaml .Values.configData | indent 4 }}

View File

@@ -1,221 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "docker-registry.fullname" . }}
labels:
app: {{ template "docker-registry.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
matchLabels:
app: {{ template "docker-registry.name" . }}
release: {{ .Release.Name }}
replicas: {{ .Values.replicaCount }}
{{- if .Values.updateStrategy }}
strategy:
{{ toYaml .Values.updateStrategy | indent 4 }}
{{- end }}
minReadySeconds: 5
template:
metadata:
labels:
app: {{ template "docker-registry.name" . }}
release: {{ .Release.Name }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if $.Values.podAnnotations }}
{{ toYaml $.Values.podAnnotations | indent 8 }}
{{- end }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
{{- if .Values.securityContext.enabled }}
securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }}
runAsUser: {{ .Values.securityContext.runAsUser }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- /bin/registry
- serve
- /etc/docker/registry/config.yml
ports:
- containerPort: 5000
livenessProbe:
httpGet:
{{- if .Values.tlsSecretName }}
scheme: HTTPS
{{- end }}
path: /
port: 5000
readinessProbe:
httpGet:
{{- if .Values.tlsSecretName }}
scheme: HTTPS
{{- end }}
path: /
port: 5000
resources:
{{ toYaml .Values.resources | indent 12 }}
env:
{{- if .Values.secrets.htpasswd }}
- name: REGISTRY_AUTH
value: "htpasswd"
- name: REGISTRY_AUTH_HTPASSWD_REALM
value: "Registry Realm"
- name: REGISTRY_AUTH_HTPASSWD_PATH
value: "/auth/htpasswd"
{{- end }}
- name: REGISTRY_HTTP_SECRET
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: haSharedSecret
{{- if .Values.tlsSecretName }}
- name: REGISTRY_HTTP_TLS_CERTIFICATE
value: /etc/ssl/docker/tls.crt
- name: REGISTRY_HTTP_TLS_KEY
value: /etc/ssl/docker/tls.key
{{- end }}
{{- if eq .Values.storage "filesystem" }}
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: "/var/lib/registry"
{{- else if eq .Values.storage "azure" }}
- name: REGISTRY_STORAGE_AZURE_ACCOUNTNAME
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: azureAccountName
- name: REGISTRY_STORAGE_AZURE_ACCOUNTKEY
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: azureAccountKey
- name: REGISTRY_STORAGE_AZURE_CONTAINER
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: azureContainer
{{- else if eq .Values.storage "s3" }}
{{- if and .Values.secrets.s3.secretKey .Values.secrets.s3.accessKey }}
- name: REGISTRY_STORAGE_S3_ACCESSKEY
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: s3AccessKey
- name: REGISTRY_STORAGE_S3_SECRETKEY
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: s3SecretKey
{{- end }}
- name: REGISTRY_STORAGE_S3_REGION
value: {{ required ".Values.s3.region is required" .Values.s3.region }}
{{- if .Values.s3.regionEndpoint }}
- name: REGISTRY_STORAGE_S3_REGIONENDPOINT
value: {{ .Values.s3.regionEndpoint }}
{{- end }}
- name: REGISTRY_STORAGE_S3_BUCKET
value: {{ required ".Values.s3.bucket is required" .Values.s3.bucket }}
{{- if .Values.s3.encrypt }}
- name: REGISTRY_STORAGE_S3_ENCRYPT
value: {{ .Values.s3.encrypt | quote }}
{{- end }}
{{- if .Values.s3.secure }}
- name: REGISTRY_STORAGE_S3_SECURE
value: {{ .Values.s3.secure | quote }}
{{- end }}
{{- else if eq .Values.storage "swift" }}
- name: REGISTRY_STORAGE_SWIFT_AUTHURL
value: {{ required ".Values.swift.authurl is required" .Values.swift.authurl }}
- name: REGISTRY_STORAGE_SWIFT_USERNAME
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: swiftUsername
- name: REGISTRY_STORAGE_SWIFT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: swiftPassword
- name: REGISTRY_STORAGE_SWIFT_CONTAINER
value: {{ required ".Values.swift.container is required" .Values.swift.container }}
{{- end }}
{{- if .Values.persistence.deleteEnabled }}
- name: REGISTRY_STORAGE_DELETE_ENABLED
value: "true"
{{- end }}
volumeMounts:
{{- if .Values.secrets.htpasswd }}
- name: auth
mountPath: /auth
readOnly: true
{{- end }}
{{- if eq .Values.storage "filesystem" }}
- name: data
mountPath: /var/lib/registry/
{{- end }}
- name: "{{ template "docker-registry.fullname" . }}-config"
mountPath: "/etc/docker/registry"
{{- if .Values.tlsSecretName }}
- mountPath: /etc/ssl/docker
name: tls-cert
readOnly: true
{{- end }}
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
volumes:
{{- if .Values.secrets.htpasswd }}
- name: auth
secret:
secretName: {{ template "docker-registry.fullname" . }}-secret
items:
- key: htpasswd
path: htpasswd
{{- end }}
{{- if eq .Values.storage "filesystem" }}
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "docker-registry.fullname" . }}{{- end }}
{{- else }}
emptyDir: {}
{{- end -}}
{{- end }}
- name: {{ template "docker-registry.fullname" . }}-config
configMap:
name: {{ template "docker-registry.fullname" . }}-config
{{- if .Values.tlsSecretName }}
- name: tls-cert
secret:
secretName: {{ .Values.tlsSecretName }}
{{- end }}
{{- with .Values.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,36 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $serviceName := include "docker-registry.fullname" . -}}
{{- $servicePort := .Values.service.port -}}
{{- $path := .Values.ingress.path -}}
apiVersion: {{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} networking.k8s.io/v1beta1 {{- else }} extensions/v1beta1 {{- end }}
kind: Ingress
metadata:
name: {{ template "docker-registry.fullname" . }}
labels:
app: {{ template "docker-registry.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.ingress.labels }}
{{ toYaml .Values.ingress.labels | indent 4 }}
{{- end }}
annotations:
{{- range $key, $value := .Values.ingress.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
rules:
{{- range $host := .Values.ingress.hosts }}
- host: {{ $host }}
http:
paths:
- path: {{ $path }}
backend:
serviceName: {{ $serviceName }}
servicePort: {{ $servicePort }}
{{- end -}}
{{- if .Values.ingress.tls }}
tls:
{{ toYaml .Values.ingress.tls | indent 4 }}
{{- end -}}
{{- end -}}

Some files were not shown because too many files have changed in this diff Show More