Merge pull request #18 from rancherfederal/feat/ci

For workflow testing
This commit is contained in:
Jennifer Power
2021-06-15 15:56:08 -04:00
committed by GitHub
63 changed files with 5179 additions and 1091 deletions

31
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,31 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
<!-- Thanks for helping us to improve Hauler! We welcome all bug reports. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
**Environmental Info:**
**Hauler Version:**
**System CPU architecture, OS, and Version:**
<!-- Provide the output from "uname -a" on the system where Hauler is installed -->
**Describe the bug:**
<!-- A clear and concise description of what the bug is. -->
**Steps To Reproduce:**
**Expected behavior:**
<!-- A clear and concise description of what you expected to happen. -->
**Actual behavior:**
<!-- A clear and concise description of what actually happened. -->
**Additional context / logs:**
<!-- Add any other context and/or logs about the problem here. -->

23
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,23 @@
* **Please check if the PR fulfills these requirements**
- [ ] The commit message follows our guidelines
- [ ] Tests for the changes have been added (for bug fixes / features)
- [ ] Docs have been added / updated (for bug fixes / features)
* **What kind of change does this PR introduce?** (Bug fix, feature, docs update, ...)
* **What is the current behavior?** (You can also link to an open issue here)
* **What is the new behavior (if this is a feature change)?**
* **Does this PR introduce a breaking change?** (What changes might users need to make in their application due to this PR?)
* **Other information**:

78
.github/workflows/ci.yaml vendored Normal file
View File

@@ -0,0 +1,78 @@
name: CI
on:
workflow_dispatch:
jobs:
test:
strategy:
matrix:
go-version: [1.15.x, 1.16.x]
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: go test ./...
- name: Run vet & lint
run: |
go vet .
golint .
create-release:
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
outputs:
upload_url: ${{ steps.goreleaser.outputs.upload_url }}
steps:
- name: Download release notes utility
env:
GH_REL_URL: https://github.com/buchanae/github-release-notes/releases/download/0.2.0/github-release-notes-linux-amd64-0.2.0.tar.gz
run: cd /tmp && curl -sSL ${GH_REL_URL} | tar xz && sudo mv github-release-notes /usr/local/bin/
- name: Generate release notes
run: |
echo 'CHANGELOG' > /tmp/release.txt
github-release-notes -org rancherfederal -repo hauler -since-latest-release -include-author >> /tmp/release.txt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Run GoReleaser
id: goreleaser
uses: goreleaser/goreleaser-action@v1
with:
version: latest
args: release --release-notes=/tmp/release.txt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
uses: actions/upload-artifact@v2
with:
name: hauler
path: dist/hauler_linux_amd64/haulerctl
retention-days: 2
create-artifacts:
runs-on: ubuntu-latest
needs: create-release
steps:
- name: Download Hauler binary
id: download
uses: actions/download-artifact@v2
- name: Run Hauler bundle
run: chmod +x haulerctl && ./haulerctl create
- name: Upload default package to Release
id: upload-release-asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs,create-release.outputs.upload_url }}
asset_path: ${{github.workspace}}/haul.tar.zst
asset_name: haul.tar.zst
asset_content_type: application/zstd

9
.gitignore vendored
View File

@@ -15,3 +15,12 @@
artifacts
local-artifacts
airgap-scp.sh
# test artifacts
*.tar*
# generated
dist/
./bundle/
tmp/
bin/

14
.goreleaser.yaml Normal file
View File

@@ -0,0 +1,14 @@
project_name: hauler
builds:
- main: cmd/haulerctl/main.go
goos:
- linux
- darwin
goarch:
- amd64
- arm64
env:
- CGO_ENABLED=0
flags:
- -tags=containers_image_openpgp containers_image_ostree

32
Makefile Normal file
View File

@@ -0,0 +1,32 @@
SHELL:=/bin/bash
GO_BUILD_ENV=GOOS=linux GOARCH=amd64
GO_FILES=$(shell go list ./... | grep -v /vendor/)
BUILD_VERSION=$(shell cat VERSION)
BUILD_TAG=$(BUILD_VERSION)
.SILENT:
all: fmt vet install test
build:
mkdir bin;\
$(GO_BUILD_ENV) go build -o bin ./cmd/...;\
install:
$(GO_BUILD_ENV) go install
vet:
go vet $(GO_FILES)
fmt:
go fmt $(GO_FILES)
test:
go test $(GO_FILES) -cover
integration_test:
go test -tags=integration $(GO_FILES)
clean:
rm -rf bin 2> /dev/null

View File

@@ -1,8 +1,14 @@
# Hauler Roadmap
## v0.0.x
- Install single-node k3s cluster into an Ubuntu machine using the tarball installation method
## v0.1.0
- Install single-node k3s cluster
- Support tarball and rpm installation methods
- Target narrow set of known Operating Systems to have OS-specific code if needed
- Serve container images
- Collect images from image list file
- Collect images from image archives

6
Vagrantfile vendored
View File

@@ -5,18 +5,18 @@
##################################
Vagrant.configure("2") do |config|
config.vm.box = "centos/7"
config.vm.box = "centos/8"
config.vm.hostname = "airgap"
config.vm.network "private_network", type: "dhcp"
config.vm.synced_folder ".", "/opt/hauler"
config.vm.synced_folder ".", "/vagrant"
config.vm.provider "virtualbox" do |vb|
vb.memory = "2048"
vb.cpus = "2"
config.vm.provision "airgap", type: "shell", run: "always",
inline: "/opt/hauler/vagrant-scripts/airgap.sh airgap"
inline: "/vagrant/vagrant-scripts/airgap.sh airgap"
end
# SELinux is Enforcing by default.

View File

@@ -1,38 +0,0 @@
package app
import (
"github.com/spf13/cobra"
)
func NewDeployCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "deploy",
Short: "deploy all dependencies from a generated package",
Long: `deploy all dependencies in a generated package.
Given the package archive generated from the package command, deploy all needed
components to serve all packaged dependencies.`,
}
return cmd
}
type DeployOptions struct {
// ImageLists []string
// ImageArchives []string
}
// Complete takes the command arguments and infers any remaining options.
func (o *DeployOptions) Complete() error {
return nil
}
// Validate checks the provided set of options.
func (o *DeployOptions) Validate() error {
return nil
}
// Run performs the operation.
func (o *DeployOptions) Run() error {
return nil
}

View File

@@ -1,90 +0,0 @@
package app
import (
"fmt"
"io"
"log"
"os"
"github.com/rancherfederal/hauler/pkg/packager"
"github.com/spf13/cobra"
)
func NewPackageCommand() *cobra.Command {
opts := &PackageOptions{}
cmd := &cobra.Command{
Use: "package",
Short: "package all dependencies into an installable archive",
Long: `package all dependencies into an archive used by deploy.
Container images, git repositories, and more, packaged and ready to be served within an air gap.`,
RunE: func(cmd *cobra.Command, args []string) error {
if err := opts.Complete(); err != nil {
return err
}
if err := opts.Validate(); err != nil {
return err
}
return opts.Run()
},
}
cmd.Flags().StringVar(
&opts.OutputFileName, "out-file", "hauler-package.tar.gz",
"specify the package's output location; '-' writes to standard out",
)
return cmd
}
type PackageOptions struct {
OutputFileName string
// ImageLists []string
// ImageArchives []string
}
// Complete takes the command arguments and infers any remaining options.
func (o *PackageOptions) Complete() error {
return nil
}
// Validate checks the provided set of options.
func (o *PackageOptions) Validate() error {
return nil
}
const (
k3sVersion = "v1.18.8+k3s1"
)
// Run performs the operation.
func (o *PackageOptions) Run() error {
var dst io.Writer
if o.OutputFileName == "-" {
dst = os.Stdout
} else {
dstFile, err := os.Create(o.OutputFileName)
if err != nil {
return fmt.Errorf("create output file: %v", err)
}
dst = dstFile
}
pconfig := packager.Config{
Destination: dst,
KubernetesVersion: "k3s:" + k3sVersion,
}
p, err := packager.New(pconfig)
if err != nil {
return fmt.Errorf("initialize packager: %v", err)
}
if err := p.Run(); err != nil {
log.Fatalln(err)
}
return nil
}

View File

@@ -1,23 +0,0 @@
package app
import (
"github.com/spf13/cobra"
)
func NewRootCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "hauler",
Short: "hauler provides airgap migration assitance",
Long: `hauler provides airgap migration assistance using k3s.
Choose your functionality and create a package when internet access is available,
then deploy the package into your air-gapped environment.`,
RunE: func(cmd *cobra.Command, _ []string) error {
return cmd.Help()
},
}
cmd.AddCommand(NewPackageCommand())
return cmd
}

View File

@@ -0,0 +1,109 @@
package app
import (
"context"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/bootstrap"
"github.com/rancherfederal/hauler/pkg/packager"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"os"
"path/filepath"
"sigs.k8s.io/yaml"
)
type deployOpts struct {
*rootOpts
haulerDir string
}
// NewBootstrapCommand new a new sub command of haulerctl that bootstraps a cluster
func NewBootstrapCommand() *cobra.Command {
opts := &deployOpts{
rootOpts: &ro,
}
cmd := &cobra.Command{
Use: "bootstrap",
Short: "Single-command install of a k3s cluster with known tools running inside of it",
Long: `Single-command install of a k3s cluster with known tools running inside of it. Tools
include an OCI registry and Git server`,
Aliases: []string{"b", "boot"},
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return opts.Run(args[0])
},
}
f := cmd.Flags()
f.StringVarP(&opts.haulerDir, "hauler-dir", "", "/opt/hauler", "Directory to install hauler components in")
return cmd
}
// Run performs the operation.
func (o *deployOpts) Run(packagePath string) error {
o.logger.Infof("Bootstrapping from '%s'", packagePath)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
return err
}
defer os.Remove(tmpdir)
o.logger.Debugf("Using temporary working directory: %s", tmpdir)
a := packager.NewArchiver()
err = packager.Unpackage(a, packagePath, tmpdir)
if err != nil {
return err
}
bundleData, err := os.ReadFile(filepath.Join(tmpdir, "package.json"))
if err != nil {
return err
}
var p v1alpha1.Package
if err := yaml.Unmarshal(bundleData, &p); err != nil {
return err
}
d := v1alpha1.NewDriver(p.Spec.Driver.Kind)
bootLogger := o.logger.WithFields(logrus.Fields{
"driver": p.Spec.Driver.Kind,
})
b, err := bootstrap.NewBooter(tmpdir)
if err != nil {
return err
}
o.logger.Infof("Initializing package for driver: %s", p.Spec.Driver.Kind)
if err := b.Init(); err != nil {
return err
}
o.logger.Infof("Performing pre %s boot steps", p.Spec.Driver.Kind)
if err := b.PreBoot(ctx, d, bootLogger); err != nil {
return err
}
o.logger.Infof("Booting %s", p.Spec.Driver.Kind)
if err := b.Boot(ctx, d, bootLogger); err != nil {
return err
}
o.logger.Infof("Performing post %s boot steps", p.Spec.Driver.Kind)
if err := b.PostBoot(ctx, d, bootLogger); err != nil {
return err
}
o.logger.Infof("Success! You can access the cluster with '/opt/hauler/bin/kubectl'")
return nil
}

View File

@@ -0,0 +1,36 @@
package app
import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
type bundleOpts struct {
bundleDir string
}
// NewBundleCommand creates a new sub command under
// haulterctl for bundling images and artifacts
func NewBundleCommand() *cobra.Command {
opts := &bundleOpts{}
cmd := &cobra.Command{
Use: "bundle",
Short: "bundle images or artifact for relocation",
Long: "",
Aliases: []string{"b"},
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
f := cmd.PersistentFlags()
f.StringVarP(&opts.bundleDir, "bundledir", "b", "./bundle",
"directory locating a bundle, if one exists we will append (./bundle)")
cmd.AddCommand(NewBundleArtifactsCommand(opts))
viper.AutomaticEnv()
return cmd
}

View File

@@ -0,0 +1,51 @@
package app
import (
"context"
"fmt"
"github.com/spf13/cobra"
)
type bundleArtifactsOpts struct {
bundle *bundleOpts
}
// NewBundleArtifactsCommand creates a new sub command of bundle for artifacts
func NewBundleArtifactsCommand(bundle *bundleOpts) *cobra.Command {
opts := &bundleArtifactsOpts{bundle: bundle}
cmd := &cobra.Command{
Use: "artifacts",
Short: "Choose a folder on disk, new artifact containing all of folder's contents",
RunE: func(cmd *cobra.Command, args []string) error {
return opts.Run()
},
}
return cmd
}
func (o *bundleArtifactsOpts) Run() error {
//TODO
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
//b := bundle.NewLayoutStore(o.bundleDir)
//
//images := []string{"alpine:latest", "registry:2.7.1"}
//
//for _, i := range images {
// if err := b.Add(ctx, i); err != nil {
// return err
// }
//}
_ = ctx
fmt.Println("bundle artifacts")
fmt.Println(o.bundle.bundleDir)
return nil
}

48
cmd/haulerctl/app/copy.go Normal file
View File

@@ -0,0 +1,48 @@
package app
import (
"context"
"github.com/rancherfederal/hauler/pkg/oci"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
type copyOpts struct {
dir string
sourceRef string
}
// NewCopyCommand creates a new sub command under
// haulerctl for coping files to local disk
func NewCopyCommand() *cobra.Command {
opts := &copyOpts{}
cmd := &cobra.Command{
Use: "copy",
Short: "Download artifacts from OCI registry to local disk",
Aliases: []string{"c", "cp"},
//Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.sourceRef = args[0]
return opts.Run(opts.sourceRef)
},
}
f := cmd.Flags()
f.StringVarP(&opts.dir, "dir", "d", ".", "Target directory for file copy")
return cmd
}
// Run performs the operation.
func (o *copyOpts) Run(src string) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
if err := oci.Get(ctx, src, o.dir); err != nil {
logrus.Error(err)
}
return nil
}

119
cmd/haulerctl/app/create.go Normal file
View File

@@ -0,0 +1,119 @@
package app
import (
"context"
"os"
"github.com/pterm/pterm"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/packager"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"sigs.k8s.io/yaml"
)
type createOpts struct {
*rootOpts
driver string
outputFile string
configFile string
}
// NewCreateCommand creates a new sub command under
// haulerctl for creating dependency artifacts for bootstraps
func NewCreateCommand() *cobra.Command {
opts := &createOpts{
rootOpts: &ro,
}
cmd := &cobra.Command{
Use: "create",
Short: "package all dependencies into a compressed archive",
Long: `package all dependencies into a compressed archive used by deploy.
Container images, git repositories, and more, packaged and ready to be served within an air gap.`,
Aliases: []string{"c"},
PreRunE: func(cmd *cobra.Command, args []string) error {
return opts.PreRun()
},
RunE: func(cmd *cobra.Command, args []string) error {
return opts.Run()
},
}
f := cmd.Flags()
f.StringVarP(&opts.driver, "driver", "d", "k3s",
"Driver type to use for package (k3s or rke2)")
f.StringVarP(&opts.outputFile, "output", "o", "haul",
"package output location relative to the current directory (haul.tar.zst)")
f.StringVarP(&opts.configFile, "config", "c", "./package.yaml",
"config file")
return cmd
}
func (o *createOpts) PreRun() error {
return nil
}
// Run performs the operation.
func (o *createOpts) Run() error {
o.logger.Infof("Creating new deployable bundle using driver: %s", o.driver)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if _, err := os.Stat(o.configFile); err != nil {
logrus.Error(err)
}
bundleData, err := os.ReadFile(o.configFile)
if err != nil {
logrus.Error(err)
}
var p v1alpha1.Package
err = yaml.Unmarshal(bundleData, &p)
if err != nil {
logrus.Error(err)
}
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
logrus.Error(err)
}
defer os.RemoveAll(tmpdir)
pkgr := packager.NewPackager(tmpdir)
pb, _ := pterm.DefaultProgressbar.WithTotal(4).WithTitle("Start Packaging").Start()
o.logger.Infof("Packaging driver (%s %s) artifacts...", p.Spec.Driver.Version, p.Spec.Driver.Kind)
d := v1alpha1.NewDriver(p.Spec.Driver.Kind)
if err = pkgr.Driver(ctx, d); err != nil {
logrus.Error(err)
}
pb.Increment()
o.logger.Infof("Packaging fleet artifacts...")
if err = pkgr.Fleet(ctx, p.Spec.Fleet); err != nil {
logrus.Error(err)
}
pb.Increment()
o.logger.Infof("Packaging images and manifests defined in specified paths...")
if _, err = pkgr.Bundles(ctx, p.Spec.Paths...); err != nil {
logrus.Error(err)
}
pb.Increment()
a := packager.NewArchiver()
o.logger.Infof("Archiving and compressing package to: %s.%s", o.outputFile, a.String())
if err = pkgr.Archive(a, p, o.outputFile); err != nil {
logrus.Error(err)
}
pb.Increment()
return nil
}

View File

@@ -0,0 +1,78 @@
package app
import (
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"io/ioutil"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"os"
"sigs.k8s.io/yaml"
"testing"
)
func Test_createOpts_Run(t *testing.T) {
l, _ := setupCliLogger(os.Stdout, "debug")
tro := rootOpts{l}
p := v1alpha1.Package{
TypeMeta: metav1.TypeMeta{
Kind: "Package",
APIVersion: "hauler.cattle.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
Spec: v1alpha1.PackageSpec{
Fleet: v1alpha1.Fleet{Version: "0.3.5"},
Driver: v1alpha1.Driver{
Kind: "k3s",
Version: "v1.21.1+k3s1",
},
Paths: []string{
"../../../testdata/docker-registry",
"../../../testdata/rawmanifests",
},
Images: []string{},
},
}
data, _ := yaml.Marshal(p)
if err := ioutil.WriteFile("create_test.package.yaml", data, 0644); err != nil {
t.Fatalf("failed to write test config file: %v", err)
}
defer os.Remove("create_test.package.yaml")
type fields struct {
rootOpts *rootOpts
driver string
outputFile string
configFile string
}
tests := []struct {
name string
fields fields
wantErr bool
}{
{
name: "should work",
fields: fields{
rootOpts: &tro,
driver: "k3s",
outputFile: "package",
configFile: "./create_test.package.yaml",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
o := &createOpts{
rootOpts: tt.fields.rootOpts,
driver: tt.fields.driver,
outputFile: tt.fields.outputFile,
configFile: tt.fields.configFile,
}
if err := o.Run(); (err != nil) != tt.wantErr {
t.Errorf("Run() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

42
cmd/haulerctl/app/oci.go Normal file
View File

@@ -0,0 +1,42 @@
package app
import (
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/spf13/cobra"
)
type ociOpts struct {
insecure bool
plainHTTP bool
}
const (
haulerMediaType = "application/vnd.oci.image"
)
func NewOCICommand() *cobra.Command {
opts := ociOpts{}
cmd := &cobra.Command{
Use: "oci",
Short: "oci stuff",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(NewOCIPushCommand())
cmd.AddCommand(NewOCIPullCommand())
f := cmd.Flags()
f.BoolVarP(&opts.insecure, "insecure", "", false, "Connect to registry without certs")
f.BoolVarP(&opts.plainHTTP, "plain-http", "", false, "Connect to registry over plain http")
return cmd
}
func (o *ociOpts) resolver() (remotes.Resolver, error) {
resolver := docker.NewResolver(docker.ResolverOptions{PlainHTTP: true})
return resolver, nil
}

View File

@@ -0,0 +1,67 @@
package app
import (
"context"
"github.com/deislabs/oras/pkg/content"
"github.com/deislabs/oras/pkg/oras"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
type ociPullOpts struct {
ociOpts
sourceRef string
outDir string
}
func NewOCIPullCommand() *cobra.Command {
opts := ociPullOpts{}
cmd := &cobra.Command{
Use: "pull",
Short: "oci pull",
Aliases: []string{"p"},
Args: cobra.MinimumNArgs(1),
PreRunE: func(cmd *cobra.Command, args []string) error {
return opts.PreRun()
},
RunE: func(cmd *cobra.Command, args []string) error {
opts.sourceRef = args[0]
return opts.Run()
},
}
f := cmd.Flags()
f.StringVarP(&opts.outDir, "out-dir", "o", ".", "output directory")
return cmd
}
func (o *ociPullOpts) PreRun() error {
return nil
}
func (o *ociPullOpts) Run() error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
store := content.NewFileStore(o.outDir)
defer store.Close()
allowedMediaTypes := []string{
haulerMediaType,
}
resolver, err := o.resolver()
if err != nil {
return err
}
desc, _, err := oras.Pull(ctx, resolver, o.sourceRef, store, oras.WithAllowedMediaTypes(allowedMediaTypes))
logrus.Infof("pulled %s with digest: %s", o.sourceRef, desc.Digest)
return nil
}

View File

@@ -0,0 +1,74 @@
package app
import (
"context"
"github.com/deislabs/oras/pkg/content"
"github.com/deislabs/oras/pkg/oras"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"os"
)
type ociPushOpts struct {
ociOpts
targetRef string
pathRef string
}
func NewOCIPushCommand() *cobra.Command {
opts := ociPushOpts{}
cmd := &cobra.Command{
Use: "push",
Short: "oci push",
Aliases: []string{"p"},
Args: cobra.MinimumNArgs(2),
PreRunE: func(cmd *cobra.Command, args []string) error {
return opts.PreRun()
},
RunE: func(cmd *cobra.Command, args []string) error {
opts.pathRef = args[0]
opts.targetRef = args[1]
return opts.Run()
},
}
return cmd
}
func (o *ociPushOpts) PreRun() error {
return nil
}
func (o *ociPushOpts) Run() error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
data, err := os.ReadFile(o.pathRef)
if err != nil {
return err
}
resolver, err := o.resolver()
if err != nil {
return err
}
store := content.NewMemoryStore()
contents := []ocispec.Descriptor{
store.Add(o.pathRef, haulerMediaType, data),
}
desc, err := oras.Push(ctx, resolver, o.targetRef, store, contents)
if err != nil {
return err
}
logrus.Infof("pushed %s to %s with digest: %s", o.pathRef, o.targetRef, desc.Digest)
return nil
}

View File

@@ -0,0 +1,34 @@
package app
import (
"github.com/spf13/cobra"
)
type relocateOpts struct {
inputFile string
}
// NewRelocateCommand creates a new sub command under
// haulterctl for relocating images and artifacts
func NewRelocateCommand() *cobra.Command {
opts := &relocateOpts{}
cmd := &cobra.Command{
Use: "relocate",
Short: "relocate images or artifacts to a registry",
Long: "",
Aliases: []string{"r"},
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
f := cmd.PersistentFlags()
f.StringVarP(&opts.inputFile, "input", "i", "haul.tar.zst",
"package output location relative to the current directory (haul.tar.zst)")
cmd.AddCommand(NewRelocateArtifactsCommand(opts))
cmd.AddCommand(NewRelocateImagesCommand(opts))
return cmd
}

View File

@@ -0,0 +1,64 @@
package app
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"github.com/rancherfederal/hauler/pkg/oci"
"github.com/rancherfederal/hauler/pkg/packager"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
type relocateArtifactsOpts struct {
relocate *relocateOpts
destRef string
}
// NewRelocateArtifactsCommand creates a new sub command of relocate for artifacts
func NewRelocateArtifactsCommand(relocate *relocateOpts) *cobra.Command {
opts := &relocateArtifactsOpts{relocate: relocate}
cmd := &cobra.Command{
Use: "artifacts",
Short: "Use artifact from bundle artifacts to populate a target file server with the artifact's contents",
RunE: func(cmd *cobra.Command, args []string) error {
opts.destRef = args[0]
return opts.Run(opts.destRef)
},
}
return cmd
}
func (o *relocateArtifactsOpts) Run(dst string) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ar := packager.NewArchiver()
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
logrus.Error(err)
}
packager.Unpackage(ar, o.relocate.inputFile, tmpdir)
files, err := ioutil.ReadDir(tmpdir)
if err != nil {
logrus.Error(err)
}
for _, f := range files {
if err := oci.Put(ctx, filepath.Join(tmpdir, f.Name()), dst); err != nil {
logrus.Error(err)
}
}
return nil
}

View File

@@ -0,0 +1,89 @@
package app
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/layout"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/rancherfederal/hauler/pkg/oci"
"github.com/rancherfederal/hauler/pkg/packager"
"github.com/spf13/cobra"
)
type relocateImagesOpts struct {
relocate *relocateOpts
destRef string
}
// NewRelocateImagesCommand creates a new sub command of relocate for images
func NewRelocateImagesCommand(relocate *relocateOpts) *cobra.Command {
opts := &relocateImagesOpts{relocate: relocate}
cmd := &cobra.Command{
Use: "images",
Short: "Use artifact from bundle images to populate a target registry with the artifact's images",
RunE: func(cmd *cobra.Command, args []string) error {
opts.destRef = args[0]
return opts.Run(opts.destRef)
},
}
return cmd
}
func (o *relocateImagesOpts) Run(dst string) error {
ar := packager.NewArchiver()
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
return err
}
packager.Unpackage(ar, o.relocate.inputFile, tmpdir)
if err != nil {
return err
}
path := filepath.Join(tmpdir, "layout")
ly, err := layout.FromPath(path)
if err != nil {
return err
}
for nm, hash := range oci.ListImages(ly) {
n := strings.SplitN(nm, "/", 2)
img, err := ly.Image(hash)
fmt.Printf("Copy %s to %s", n[1], dst)
fmt.Println()
if err != nil {
return err
}
dstimg := dst + "/" + n[1]
tag, err := name.ParseReference(dstimg)
if err != nil {
return err
}
if err := remote.Write(tag, img); err != nil {
return err
}
}
return nil
}

120
cmd/haulerctl/app/root.go Normal file
View File

@@ -0,0 +1,120 @@
package app
import (
"fmt"
"io"
"os"
"time"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
homedir "github.com/mitchellh/go-homedir"
"github.com/spf13/viper"
)
var (
cfgFile string
loglevel string
timeout time.Duration
getLong = `haulerctl provides CLI-based air-gap migration assistance using k3s.
Choose your functionality and new a package when internet access is available,
then deploy the package into your air-gapped environment.
`
getExample = `
# Run Hauler
haulerctl bundle images <images>
haulerctl bundle artifacts <artfiacts>
haulerctl relocate artifacts -i <package-name>
haulerctl relocate images -i <package-name> locahost:5000
haulerctl copy
haulerctl create
haulerctl bootstrap`
)
type rootOpts struct {
logger log.Logger
}
var ro rootOpts
// NewRootCommand defines the root haulerctl command
func NewRootCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "haulerctl",
Short: "haulerctl provides CLI-based air-gap migration assistance",
Long: getLong,
Example: getExample,
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
l, err := setupCliLogger(os.Stdout, loglevel)
if err != nil {
return err
}
ro.logger = l
return nil
},
RunE: func(cmd *cobra.Command, _ []string) error {
return cmd.Help()
},
}
cobra.OnInitialize(initConfig)
cmd.AddCommand(NewRelocateCommand())
cmd.AddCommand(NewCreateCommand())
cmd.AddCommand(NewBundleCommand())
cmd.AddCommand(NewCopyCommand())
cmd.AddCommand(NewBootstrapCommand())
f := cmd.PersistentFlags()
f.StringVarP(&loglevel, "loglevel", "l", "info",
"Log level (debug, info, warn, error, fatal, panic)")
f.StringVarP(&cfgFile, "config", "c", "./hauler.yaml",
"config file (./hauler.yaml)")
f.DurationVar(&timeout, "timeout", 1*time.Minute,
"timeout for operations")
return cmd
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := homedir.Dir()
cobra.CheckErr(err)
// Search config in home directory with name ".hauler" (without extension).
viper.AddConfigPath(home)
viper.SetConfigName(".hauler")
}
viper.AutomaticEnv() // read in environment variables that match
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed())
}
}
func setupCliLogger(out io.Writer, level string) (log.Logger, error) {
l := log.NewLogger(out)
lvl, err := logrus.ParseLevel(level)
if err != nil {
return nil, err
}
l.SetLevel(lvl)
return l, nil
}

View File

@@ -3,7 +3,7 @@ package main
import (
"log"
"github.com/rancherfederal/hauler/cmd/hauler/app"
"github.com/rancherfederal/hauler/cmd/haulerctl/app"
)
func main() {

69
go.mod
View File

@@ -1,8 +1,71 @@
module github.com/rancherfederal/hauler
go 1.14
go 1.16
require (
github.com/google/go-containerregistry v0.1.2
github.com/spf13/cobra v1.0.0
cloud.google.com/go/storage v1.8.0 // indirect
github.com/Microsoft/go-winio v0.5.0 // indirect
github.com/containerd/cgroups v1.0.1 // indirect
github.com/containerd/containerd v1.5.0-beta.4
github.com/containerd/continuity v0.1.0 // indirect
github.com/containers/image/v5 v5.12.0
github.com/deislabs/oras v0.11.1
github.com/docker/docker v20.10.6+incompatible // indirect
github.com/google/go-containerregistry v0.4.1
github.com/google/uuid v1.2.0 // indirect
github.com/imdario/mergo v0.3.12
github.com/klauspost/compress v1.13.0 // indirect
github.com/mholt/archiver/v3 v3.5.0
github.com/mitchellh/go-homedir v1.1.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/otiai10/copy v1.6.0
github.com/pelletier/go-toml v1.8.1 // indirect
github.com/pterm/pterm v0.12.23
github.com/rancher/fleet v0.3.5
github.com/rancher/fleet/pkg/apis v0.0.0
github.com/sirupsen/logrus v1.8.1
github.com/spf13/afero v1.6.0
github.com/spf13/cobra v1.1.3
github.com/spf13/viper v1.7.0
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 // indirect
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 // indirect
google.golang.org/genproto v0.0.0-20210524171403-669157292da3 // indirect
google.golang.org/grpc v1.38.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
helm.sh/helm/v3 v3.5.1
k8s.io/apimachinery v0.21.1
k8s.io/cli-runtime v0.20.2
k8s.io/client-go v11.0.1-0.20190816222228-6d55c1b1f1ca+incompatible
sigs.k8s.io/cli-utils v0.23.1
sigs.k8s.io/controller-runtime v0.9.0
sigs.k8s.io/yaml v1.2.0
)
replace (
github.com/rancher/fleet/pkg/apis v0.0.0 => github.com/rancher/fleet/pkg/apis v0.0.0-20210604212701-3a76c78716ab
helm.sh/helm/v3 => github.com/rancher/helm/v3 v3.3.3-fleet1
k8s.io/api => k8s.io/api v0.20.2
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.2 // indirect
k8s.io/apimachinery => k8s.io/apimachinery v0.20.2 // indirect
k8s.io/apiserver => k8s.io/apiserver v0.20.2
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.2
k8s.io/client-go => github.com/rancher/client-go v0.20.0-fleet1
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.2
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.2
k8s.io/code-generator => k8s.io/code-generator v0.20.2
k8s.io/component-base => k8s.io/component-base v0.20.2
k8s.io/component-helpers => k8s.io/component-helpers v0.20.2
k8s.io/controller-manager => k8s.io/controller-manager v0.20.2
k8s.io/cri-api => k8s.io/cri-api v0.20.2
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.2
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.2
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.2
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.2
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.2
k8s.io/kubectl => k8s.io/kubectl v0.20.2
k8s.io/kubelet => k8s.io/kubelet v0.20.2
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.2
k8s.io/metrics => k8s.io/metrics v0.20.2
k8s.io/mount-utils => k8s.io/mount-utils v0.20.2
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.2
)

1633
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,96 @@
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/cli-utils/pkg/object"
)
type Drive interface {
Images() ([]string, error)
BinURL() string
LibPath() string
EtcPath() string
Config() (*map[string]interface{}, error)
SystemObjects() (objs []object.ObjMetadata)
}
type Driver struct {
Kind string `json:"kind"`
Version string `json:"version"`
}
type k3s struct {
dataDir string
etcDir string
}
//TODO: Don't hardcode this
func (k k3s) BinURL() string {
return "https://github.com/k3s-io/k3s/releases/download/v1.21.1%2Bk3s1/k3s"
}
func (k k3s) Images() ([]string, error) {
//TODO: Replace this with a query to images.txt on release page
return []string{
"docker.io/rancher/coredns-coredns:1.8.3",
"docker.io/rancher/klipper-helm:v0.5.0-build20210505",
"docker.io/rancher/klipper-lb:v0.2.0",
"docker.io/rancher/library-busybox:1.32.1",
"docker.io/rancher/library-traefik:2.4.8",
"docker.io/rancher/local-path-provisioner:v0.0.19",
"docker.io/rancher/metrics-server:v0.3.6",
"docker.io/rancher/pause:3.1",
}, nil
}
func (k k3s) Config() (*map[string]interface{}, error) {
// TODO: This should be typed
c := make(map[string]interface{})
c["write-kubeconfig-mode"] = "0644"
//TODO: Add uid or something to ensure this works for multi-node setups
c["node-name"] = "hauler"
return &c, nil
}
func (k k3s) SystemObjects() (objs []object.ObjMetadata) {
//TODO: Make sure this matches up with specified config disables
for _, dep := range []string{"coredns", "local-path-provisioner", "metrics-server"} {
objMeta, _ := object.CreateObjMetadata("kube-system", dep, schema.GroupKind{Kind: "Deployment", Group: "apps"})
objs = append(objs, objMeta)
}
return objs
}
func (k k3s) LibPath() string { return "/var/lib/rancher/k3s" }
func (k k3s) EtcPath() string { return "/etc/rancher/k3s" }
//TODO: Implement rke2 as a driver
type rke2 struct{}
func (r rke2) Images() ([]string, error) { return []string{}, nil }
func (r rke2) BinURL() string { return "" }
func (r rke2) LibPath() string { return "" }
func (r rke2) EtcPath() string { return "" }
func (r rke2) Config() (*map[string]interface{}, error) { return nil, nil }
func (r rke2) SystemObjects() (objs []object.ObjMetadata) { return objs }
//NewDriver will return the appropriate driver given a kind, defaults to k3s
func NewDriver(kind string) Drive {
var d Drive
switch kind {
case "rke2":
//TODO
d = rke2{}
default:
d = k3s{
dataDir: "/var/lib/rancher/k3s",
etcDir: "/etc/rancher/k3s",
}
}
return d
}

View File

@@ -0,0 +1,21 @@
package v1alpha1
import "fmt"
//Fleet is used as the deployment engine for all things Hauler
type Fleet struct {
//Version of fleet to package and use in deployment
Version string `json:"version"`
}
//TODO: These should be identified from the chart version
func (f Fleet) Images() ([]string, error) {
return []string{"rancher/gitjob:v0.1.15", "rancher/fleet:v0.3.5", "rancher/fleet-agent:v0.3.5"}, nil
}
func (f Fleet) CRDChart() string {
return fmt.Sprintf("https://github.com/rancher/fleet/releases/download/v0.3.5/fleet-crd-%s.tgz", f.Version)
}
func (f Fleet) Chart() string {
return fmt.Sprintf("https://github.com/rancher/fleet/releases/download/v0.3.5/fleet-%s.tgz", f.Version)
}

View File

@@ -0,0 +1,53 @@
package v1alpha1
import (
"os"
"path/filepath"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
const (
BundlesDir = "bundles"
LayoutDir = "layout"
BinDir = "bin"
ChartDir = "charts"
PackageFile = "package.json"
)
type Package struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PackageSpec `json:"spec"`
}
type PackageSpec struct {
Fleet Fleet `json:"fleet"`
Driver Driver `json:"driver"`
// Paths is the list of directories relative to the working directory contains all resources to be bundled.
// path globbing is supported, for example [ "charts/*" ] will match all folders as a subdirectory of charts/
// If empty, "/" is the default
Paths []string `json:"paths,omitempty"`
Images []string `json:"images,omitempty"`
}
//LoadPackageFromDir will load an existing package from a directory on disk, it fails if no PackageFile is found in dir
func LoadPackageFromDir(path string) (Package, error) {
data, err := os.ReadFile(filepath.Join(path, PackageFile))
if err != nil {
return Package{}, err
}
var p Package
if err := yaml.Unmarshal(data, &p); err != nil {
return Package{}, err
}
return p, nil
}

230
pkg/bootstrap/booter.go Normal file
View File

@@ -0,0 +1,230 @@
package bootstrap
import (
"bytes"
"context"
"fmt"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/imdario/mergo"
"github.com/otiai10/copy"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/fs"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/sirupsen/logrus"
"helm.sh/helm/v3/pkg/chart/loader"
"io"
"k8s.io/cli-runtime/pkg/genericclioptions"
"os"
"os/exec"
"path/filepath"
"sigs.k8s.io/yaml"
)
type Booter interface {
Init() error
PreBoot(context.Context) error
Boot(context.Context, v1alpha1.Drive) error
PostBoot(context.Context, v1alpha1.Drive) error
}
type booter struct {
Package v1alpha1.Package
fs fs.PkgFs
}
//NewBooter will build a new booter given a path to a directory containing a hauler package.json
func NewBooter(pkgPath string) (*booter, error) {
pkg, err := v1alpha1.LoadPackageFromDir(pkgPath)
if err != nil {
return nil, err
}
fsys := fs.NewPkgFS(pkgPath)
return &booter{
Package: pkg,
fs: fsys,
}, nil
}
func (b booter) Init() error {
d := v1alpha1.NewDriver(b.Package.Spec.Driver.Kind)
//TODO: Feel like there's a better way to do this
if err := b.moveBin(); err != nil {
return err
}
if err := b.moveImages(d); err != nil {
return err
}
if err := b.moveBundles(d); err != nil {
return err
}
if err := b.moveCharts(d); err != nil {
return err
}
return nil
}
func (b booter) PreBoot(ctx context.Context, d v1alpha1.Drive, logger log.Logger) error {
l := logger.WithFields(logrus.Fields{
"phase": "preboot",
})
l.Infof("Creating driver configuration")
if err := b.writeConfig(d); err != nil {
return err
}
return nil
}
func (b booter) Boot(ctx context.Context, d v1alpha1.Drive, logger log.Logger) error {
l := logger.WithFields(logrus.Fields{
"phase": "boot",
})
//TODO: Generic
cmd := exec.Command("/bin/sh", "/opt/hauler/bin/k3s-init.sh")
cmd.Env = append(os.Environ(), []string{
"INSTALL_K3S_SKIP_DOWNLOAD=true",
"INSTALL_K3S_SELINUX_WARN=true",
"INSTALL_K3S_SKIP_SELINUX_RPM=true",
"INSTALL_K3S_BIN_DIR=/opt/hauler/bin",
//TODO: Provide a real dryrun option
//"INSTALL_K3S_SKIP_START=true",
}...)
var stdoutBuf, stderrBuf bytes.Buffer
cmd.Stdout = io.MultiWriter(os.Stdout, &stdoutBuf)
cmd.Stderr = io.MultiWriter(os.Stderr, &stderrBuf)
err := cmd.Run()
if err != nil {
return err
}
l.Infof("Driver successfully started!")
l.Infof("Waiting for driver core components to provision...")
waitErr := waitForDriver(ctx, d)
if waitErr != nil {
return err
}
return nil
}
func (b booter) PostBoot(ctx context.Context, d v1alpha1.Drive, logger log.Logger) error {
l := logger.WithFields(logrus.Fields{
"phase": "postboot",
})
cf := genericclioptions.NewConfigFlags(true)
cf.KubeConfig = stringptr(fmt.Sprintf("%s/k3s.yaml", d.EtcPath()))
fleetCrdChartPath := b.fs.Chart().Path(fmt.Sprintf("fleet-crd-%s.tgz", b.Package.Spec.Fleet.Version))
fleetCrdChart, err := loader.Load(fleetCrdChartPath)
if err != nil {
return err
}
l.Infof("Installing fleet crds")
fleetCrdRelease, fleetCrdErr := installChart(cf, fleetCrdChart, "fleet-crd", "fleet-system", nil)
if fleetCrdErr != nil {
return fleetCrdErr
}
l.Infof("Successfully installed '%s' to namespace '%s'", fleetCrdRelease.Name, fleetCrdRelease.Namespace)
fleetChartPath := b.fs.Chart().Path(fmt.Sprintf("fleet-%s.tgz", b.Package.Spec.Fleet.Version))
fleetChart, err := loader.Load(fleetChartPath)
if err != nil {
return err
}
l.Infof("Installing fleet")
fleetRelease, fleetErr := installChart(cf, fleetChart, "fleet", "fleet-system", nil)
if fleetErr != nil {
return fleetErr
}
l.Infof("Successfully installed '%s' to namespace '%s'", fleetRelease.Name, fleetRelease.Namespace)
return nil
}
//TODO: Move* will actually just copy. This is more expensive, but is much safer/easier at handling deep merges, should this change?
func (b booter) moveBin() error {
path := filepath.Join("/opt/hauler/bin")
if err := os.MkdirAll(path, os.ModePerm); err != nil {
return err
}
return copy.Copy(b.fs.Bin().Path(), path)
}
func (b booter) moveImages(d v1alpha1.Drive) error {
//NOTE: archives are not recursively searched, this _must_ be at the images dir
path := filepath.Join(d.LibPath(), "agent/images")
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
refs, err := b.fs.MapLayout()
if err != nil {
return err
}
return tarball.MultiRefWriteToFile(filepath.Join(path, "hauler.tar"), refs)
}
func (b booter) moveBundles(d v1alpha1.Drive) error {
path := filepath.Join(d.LibPath(), "server/manifests/hauler")
if err := os.MkdirAll(d.LibPath(), 0700); err != nil {
return err
}
return copy.Copy(b.fs.Bundle().Path(), path)
}
func (b booter) moveCharts(d v1alpha1.Drive) error {
path := filepath.Join(d.LibPath(), "server/static/charts/hauler")
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
return copy.Copy(b.fs.Chart().Path(), path)
}
func (b booter) writeConfig(d v1alpha1.Drive) error {
if err := os.MkdirAll(d.EtcPath(), os.ModePerm); err != nil {
return err
}
c, err := d.Config()
if err != nil {
return err
}
var uc map[string]interface{}
path := filepath.Join(d.EtcPath(), "config.yaml")
if data, err := os.ReadFile(path); err != nil {
err := yaml.Unmarshal(data, &uc)
if err != nil {
return err
}
}
//Merge with user defined configs taking precedence
if err := mergo.Merge(c, uc); err != nil {
return err
}
data, err := yaml.Marshal(c)
return os.WriteFile(path, data, 0644)
}

67
pkg/bootstrap/kube.go Normal file
View File

@@ -0,0 +1,67 @@
package bootstrap
import (
"context"
"errors"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/kube"
log "github.com/sirupsen/logrus"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/release"
"k8s.io/cli-runtime/pkg/genericclioptions"
"os"
"path/filepath"
"time"
)
func waitForDriver(ctx context.Context, d v1alpha1.Drive) error {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
//TODO: This is a janky way of waiting for file to exist
path := filepath.Join(d.EtcPath(), "k3s.yaml")
for {
_, err := os.Stat(path)
if err == nil {
break
}
if ctx.Err() == context.DeadlineExceeded {
return errors.New("timed out waiting for driver to provision")
}
time.Sleep(1 * time.Second)
}
cfg, err := kube.NewKubeConfig()
if err != nil {
return err
}
sc, err := kube.NewStatusChecker(cfg, 5*time.Second, 5*time.Minute)
if err != nil {
return err
}
return sc.WaitForCondition(d.SystemObjects()...)
}
//TODO: This is likely way too fleet specific
func installChart(cf *genericclioptions.ConfigFlags, chart *chart.Chart, releaseName, namespace string, vals map[string]interface{}) (*release.Release, error) {
actionConfig := new(action.Configuration)
if err := actionConfig.Init(cf, namespace, os.Getenv("HELM_DRIVER"), log.Debugf); err != nil {
return nil, err
}
client := action.NewInstall(actionConfig)
client.ReleaseName = releaseName
client.Namespace, cf.Namespace = namespace, stringptr(namespace) // TODO: Not sure why this needs to be set twice
client.CreateNamespace = true
client.Wait = true
return client.Run(chart, vals)
}
//still can't figure out why helm does it this way
func stringptr(val string) *string { return &val }

View File

@@ -1 +0,0 @@
package deployer

218
pkg/fs/fs.go Normal file
View File

@@ -0,0 +1,218 @@
package fs
import (
"fmt"
"github.com/rancherfederal/hauler/pkg/packager/images"
"io"
"os"
"path/filepath"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/layout"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
fleetapi "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/spf13/afero"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/downloader"
"helm.sh/helm/v3/pkg/getter"
"k8s.io/apimachinery/pkg/util/json"
)
type PkgFs struct {
FS *afero.BasePathFs
root string
}
func NewPkgFS(dir string) PkgFs {
var p PkgFs
p.FS = afero.NewBasePathFs(afero.NewOsFs(), dir).(*afero.BasePathFs)
// TODO: absolutely no way this'll bite us in the butt later...
abs, _ := filepath.Abs(dir)
p.root = abs
return p
}
func (p PkgFs) Path(elem ...string) string {
complete := []string{p.root}
return filepath.Join(append(complete, elem...)...)
}
func (p PkgFs) Bundle() PkgFs {
return PkgFs{
FS: afero.NewBasePathFs(p.FS, v1alpha1.BundlesDir).(*afero.BasePathFs),
root: p.Path(v1alpha1.BundlesDir),
}
}
func (p PkgFs) Image() PkgFs {
return PkgFs{
FS: afero.NewBasePathFs(p.FS, v1alpha1.LayoutDir).(*afero.BasePathFs),
root: p.Path(v1alpha1.LayoutDir),
}
}
func (p PkgFs) Bin() PkgFs {
return PkgFs{
FS: afero.NewBasePathFs(p.FS, v1alpha1.BinDir).(*afero.BasePathFs),
root: p.Path(v1alpha1.BinDir),
}
}
func (p PkgFs) Chart() PkgFs {
return PkgFs{
FS: afero.NewBasePathFs(p.FS, v1alpha1.ChartDir).(*afero.BasePathFs),
root: p.Path(v1alpha1.ChartDir),
}
}
//AddBundle will add a bundle to a package and all images that are autodetected from it
func (p PkgFs) AddBundle(b *fleetapi.Bundle) error {
if err := p.mkdirIfNotExists(v1alpha1.BundlesDir, os.ModePerm); err != nil {
return err
}
data, err := json.Marshal(b)
if err != nil {
return err
}
if err := p.Bundle().WriteFile(fmt.Sprintf("%s.json", b.Name), data, 0644); err != nil {
return err
}
imgs, err := images.ImageMapFromBundle(b)
if err != nil {
return err
}
for k, v := range imgs {
err := p.AddImage(k, v)
if err != nil {
return err
}
}
return nil
}
func (p PkgFs) AddBin(r io.Reader, name string) error {
if err := p.mkdirIfNotExists(v1alpha1.BinDir, os.ModePerm); err != nil {
return err
}
f, err := p.Bin().FS.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0755)
if err != nil {
return err
}
_, err = io.Copy(f, r)
return err
}
//AddImage will add an image to the pkgfs in OCI layout fmt
//TODO: Extra work is done to ensure this is unique within the index.json
func (p PkgFs) AddImage(ref name.Reference, img v1.Image) error {
if err := p.mkdirIfNotExists(v1alpha1.LayoutDir, os.ModePerm); err != nil {
return err
}
annotations := make(map[string]string)
annotations[ocispec.AnnotationRefName] = ref.Name()
lp, err := p.layout()
if err != nil {
return err
}
//TODO: Change to ReplaceImage
return lp.AppendImage(img, layout.WithAnnotations(annotations))
}
//TODO: Not very robust
//For ref: https://github.com/helm/helm/blob/bf486a25cdc12017c7dac74d1582a8a16acd37ea/pkg/action/pull.go#L75
func (p PkgFs) AddChart(ref string, version string) error {
if err := p.mkdirIfNotExists(v1alpha1.ChartDir, os.ModePerm); err != nil {
return err
}
d := downloader.ChartDownloader{
Out: nil,
Verify: downloader.VerifyNever,
Getters: getter.All(cli.New()), // TODO: Probably shouldn't do this...
Options: []getter.Option{
getter.WithInsecureSkipVerifyTLS(true),
},
}
_, _, err := d.DownloadTo(ref, version, p.Chart().Path())
return err
}
func (p PkgFs) layout() (layout.Path, error) {
path := p.Image().Path(".")
lp, err := layout.FromPath(path)
if os.IsNotExist(err) {
lp, err = layout.Write(path, empty.Index)
}
return lp, err
}
//WriteFile is a helper method to write a file within the PkgFs
func (p PkgFs) WriteFile(name string, data []byte, perm os.FileMode) error {
f, err := p.FS.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
_, err = f.Write(data)
if err1 := f.Close(); err1 != nil && err == nil {
err = err1
}
return err
}
func (p PkgFs) MapLayout() (map[name.Reference]v1.Image, error) {
imgRefs := make(map[name.Reference]v1.Image)
//TODO: Factor this out to a Store interface
lp, err := p.layout()
if err != nil {
return nil, err
}
ii, _ := lp.ImageIndex()
im, _ := ii.IndexManifest()
for _, m := range im.Manifests {
ref, err := name.ParseReference(m.Annotations[ocispec.AnnotationRefName])
if err != nil {
return nil, err
}
img, err := lp.Image(m.Digest)
if err != nil {
return nil, err
}
imgRefs[ref] = img
}
return imgRefs, err
}
//TODO: Is this actually faster than just os.MkdirAll?
func (p PkgFs) mkdirIfNotExists(dir string, perm os.FileMode) error {
_, err := os.Stat(p.Path(dir))
if os.IsNotExist(err) {
mkdirErr := p.FS.MkdirAll(dir, perm)
if mkdirErr != nil {
return mkdirErr
}
}
return nil
}

39
pkg/kube/client.go Normal file
View File

@@ -0,0 +1,39 @@
package kube
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"path/filepath"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func NewKubeConfig() (*rest.Config, error) {
loadingRules := &clientcmd.ClientConfigLoadingRules{
Precedence: []string{
filepath.Join("/etc/rancher/k3s/k3s.yaml"),
filepath.Join("/etc/rancher/rke2/rke2.yaml"),
},
WarnIfAllMissing: true,
}
cfgOverrides := &clientcmd.ConfigOverrides{}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, cfgOverrides)
return kubeConfig.ClientConfig()
}
//NewClient returns a fresh kube client
func NewClient() (client.Client, error) {
cfg, err := NewKubeConfig()
if err != nil {
return nil, err
}
scheme := runtime.NewScheme()
return client.New(cfg, client.Options{
Scheme: scheme,
})
}

92
pkg/kube/status.go Normal file
View File

@@ -0,0 +1,92 @@
package kube
import (
"context"
"errors"
"fmt"
"k8s.io/client-go/rest"
"sigs.k8s.io/cli-utils/pkg/kstatus/polling"
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator"
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector"
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/event"
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
"sigs.k8s.io/cli-utils/pkg/object"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"strings"
"time"
)
type StatusChecker struct {
poller *polling.StatusPoller
client client.Client
interval time.Duration
timeout time.Duration
}
func NewStatusChecker(kubeConfig *rest.Config, interval time.Duration, timeout time.Duration) (*StatusChecker, error) {
restMapper, err := apiutil.NewDynamicRESTMapper(kubeConfig)
if err != nil {
return nil, err
}
c, err := client.New(kubeConfig, client.Options{Mapper: restMapper})
if err != nil {
return nil, err
}
return &StatusChecker{
poller: polling.NewStatusPoller(c, restMapper),
client: c,
interval: interval,
timeout: timeout,
}, nil
}
func (c *StatusChecker) WaitForCondition(objs ...object.ObjMetadata) error {
ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
defer cancel()
eventsChan := c.poller.Poll(ctx, objs, polling.Options{
PollInterval: c.interval,
UseCache: true,
})
coll := collector.NewResourceStatusCollector(objs)
done := coll.ListenWithObserver(eventsChan, desiredStatusNotifierFunc(cancel, status.CurrentStatus))
<-done
for _, rs := range coll.ResourceStatuses {
switch rs.Status {
case status.CurrentStatus:
fmt.Printf("%s: %s ready\n", rs.Identifier.Name, strings.ToLower(rs.Identifier.GroupKind.Kind))
case status.NotFoundStatus:
fmt.Println(fmt.Errorf("%s: %s not found", rs.Identifier.Name, strings.ToLower(rs.Identifier.GroupKind.Kind)))
default:
fmt.Println(fmt.Errorf("%s: %s not ready", rs.Identifier.Name, strings.ToLower(rs.Identifier.GroupKind.Kind)))
}
}
if coll.Error != nil || ctx.Err() == context.DeadlineExceeded {
return errors.New("timed out waiting for condition")
}
return nil
}
// desiredStatusNotifierFunc returns an Observer function for the
// ResourceStatusCollector that will cancel the context (using the cancelFunc)
// when all resources have reached the desired status.
func desiredStatusNotifierFunc(cancelFunc context.CancelFunc, desired status.Status) collector.ObserverFunc {
return func(rsc *collector.ResourceStatusCollector, _ event.Event) {
var rss []*event.ResourceStatus
for _, rs := range rsc.ResourceStatuses {
rss = append(rss, rs)
}
aggStatus := aggregator.AggregateStatus(rss, desired)
if aggStatus == desired {
cancelFunc()
}
}
}

39
pkg/log/log.go Normal file
View File

@@ -0,0 +1,39 @@
package log
import (
"github.com/sirupsen/logrus"
"io"
)
type Logger interface {
Errorf(string, ...interface{})
Infof(string, ...interface{})
Warnf(string, ...interface{})
Debugf(string, ...interface{})
WithFields(logrus.Fields) *logrus.Entry
}
type standardLogger struct {
*logrus.Logger
}
type Event struct {
id int
message string
}
var (
invalidArgMessage = Event{1, "Invalid arg: %s"}
)
func NewLogger(out io.Writer) *standardLogger {
logger := logrus.New()
logger.SetOutput(out)
return &standardLogger{logger}
}
func (l *standardLogger) InvalidArg(arg string) {
l.Errorf(invalidArgMessage.message, arg)
}

40
pkg/oci/layout.go Normal file
View File

@@ -0,0 +1,40 @@
package oci
import (
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/layout"
)
const refNameAnnotation = "org.opencontainers.image.ref.name"
func getIndexManifestsDescriptors(layout layout.Path) []v1.Descriptor {
imageIndex, err := layout.ImageIndex()
if err != nil {
return nil
}
indexManifests, err := imageIndex.IndexManifest()
if err != nil {
return nil
}
return indexManifests.Manifests
}
func ListDigests(layout layout.Path) []v1.Hash {
var digests []v1.Hash
for _, desc := range getIndexManifestsDescriptors(layout) {
digests = append(digests, desc.Digest)
}
return digests
}
func ListImages(layout layout.Path) map[string]v1.Hash {
images := make(map[string]v1.Hash)
for _, desc := range getIndexManifestsDescriptors(layout) {
if image, ok := desc.Annotations[refNameAnnotation]; ok {
images[image] = desc.Digest
}
}
return images
}

79
pkg/oci/oci.go Normal file
View File

@@ -0,0 +1,79 @@
package oci
import (
"context"
"fmt"
"os"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/deislabs/oras/pkg/content"
"github.com/deislabs/oras/pkg/oras"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
haulerMediaType = "application/vnd.oci.image"
)
// Get wraps the oras go module to get artifacts from a registry
func Get(ctx context.Context, src string, dst string) error {
store := content.NewFileStore(dst)
defer store.Close()
resolver, err := resolver()
if err != nil {
return err
}
allowedMediaTypes := []string{
haulerMediaType,
}
// Pull file(s) from registry and save to disk
fmt.Printf("pulling from %s and saving to %s\n", src, dst)
desc, _, err := oras.Pull(ctx, resolver, src, store, oras.WithAllowedMediaTypes(allowedMediaTypes))
if err != nil {
return err
}
fmt.Printf("pulled from %s with digest %s\n", src, desc.Digest)
return nil
}
// Put wraps the oras go module to put artifacts into a registry
func Put(ctx context.Context, src string, dst string) error {
data, err := os.ReadFile(src)
if err != nil {
return err
}
resolver, err := resolver()
if err != nil {
return err
}
store := content.NewMemoryStore()
contents := []ocispec.Descriptor{
store.Add(src, haulerMediaType, data),
}
desc, err := oras.Push(ctx, resolver, dst, store, contents)
if err != nil {
return err
}
fmt.Printf("pushed %s to %s with digest: %s", src, dst, desc.Digest)
return nil
}
func resolver() (remotes.Resolver, error) {
resolver := docker.NewResolver(docker.ResolverOptions{PlainHTTP: true})
return resolver, nil
}

1
pkg/oci/oci_test.go Normal file
View File

@@ -0,0 +1 @@
package oci

48
pkg/packager/archive.go Normal file
View File

@@ -0,0 +1,48 @@
package packager
import (
"fmt"
"os"
"path/filepath"
"github.com/mholt/archiver/v3"
)
type Archiver interface {
String() string
Archive([]string, string) error
Unarchive(string, string) error
}
func NewArchiver() Archiver {
return &archiver.TarZstd{
Tar: &archiver.Tar{
OverwriteExisting: true,
MkdirAll: true,
ImplicitTopLevelFolder: false,
StripComponents: 0,
ContinueOnError: false,
},
}
}
func Package(a Archiver, src string, output string) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
defer os.Chdir(cwd)
err = os.Chdir(src)
if err != nil {
return err
}
path := filepath.Join(cwd, fmt.Sprintf("%s.%s", output, a.String()))
return a.Archive([]string{"."}, path)
}
func Unpackage(a Archiver, src, dest string) error {
return a.Unarchive(src, dest)
}

View File

@@ -0,0 +1,175 @@
package images
import (
"bytes"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/remote"
fleetapi "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
"github.com/rancher/fleet/pkg/helmdeployer"
"github.com/rancher/fleet/pkg/manifest"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/client-go/util/jsonpath"
"strings"
)
type Imager interface {
Images() ([]string, error)
}
type discoveredImages []string
func (d discoveredImages) Images() ([]string, error) {
return d, nil
}
//MapImager will gather images from various Imager sources and return a single slice
func MapImager(imager ...Imager) (map[name.Reference]v1.Image, error) {
m := make(map[name.Reference]v1.Image)
for _, i := range imager {
ims, err := i.Images()
if err != nil {
return nil, err
}
remoteMap, err := ResolveRemoteRefs(ims...)
if err != nil {
return nil, err
}
//TODO: Is there a more efficient way to merge?
for k, v := range remoteMap {
m[k] = v
}
}
return m, nil
}
func ImageMapFromBundle(b *fleetapi.Bundle) (map[name.Reference]v1.Image, error) {
opts := fleetapi.BundleDeploymentOptions{
DefaultNamespace: "default",
}
m := &manifest.Manifest{Resources: b.Spec.Resources}
//TODO: I think this is right?
objs, err := helmdeployer.Template("anything", m, opts)
if err != nil {
return nil, err
}
var di discoveredImages
for _, o := range objs {
imgs, err := imageFromRuntimeObject(o.(*unstructured.Unstructured))
if err != nil {
return nil, err
}
di = append(di, imgs...)
}
return ResolveRemoteRefs(di...)
}
func IdentifyImages(b *fleetapi.Bundle) (discoveredImages, error) {
opts := fleetapi.BundleDeploymentOptions{
DefaultNamespace: "default",
}
m := &manifest.Manifest{Resources: b.Spec.Resources}
//TODO: I think this is right?
objs, err := helmdeployer.Template("anything", m, opts)
if err != nil {
return nil, err
}
var di discoveredImages
for _, o := range objs {
imgs, err := imageFromRuntimeObject(o.(*unstructured.Unstructured))
if err != nil {
return nil, err
}
di = append(di, imgs...)
}
return di, err
}
//ResolveRemoteRefs will return a slice of remote images resolved from their fully qualified name
func ResolveRemoteRefs(images ...string) (map[name.Reference]v1.Image, error) {
m := make(map[name.Reference]v1.Image)
for _, i := range images {
if i == "" {
continue
}
//TODO: This will error out if remote is a v1 image, do better error handling for this
ref, err := name.ParseReference(i)
if err != nil {
return nil, err
}
img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
if err != nil {
return nil, err
}
m[ref] = img
}
return m, nil
}
//TODO: Add user defined paths
var knownImagePaths = []string{
// Deployments & DaemonSets
"{.spec.template.spec.initContainers[*].image}",
"{.spec.template.spec.containers[*].image}",
// Pods
"{.spec.initContainers[*].image}",
"{.spec.containers[*].image}",
}
////imageFromRuntimeObject will return any images found in known obj specs
func imageFromRuntimeObject(obj *unstructured.Unstructured) (images []string, err error) {
objData, _ := obj.MarshalJSON()
var data interface{}
if err := json.Unmarshal(objData, &data); err != nil {
return nil, err
}
j := jsonpath.New("")
j.AllowMissingKeys(true)
for _, path := range knownImagePaths {
r, err := parseJSONPath(data, j, path)
if err != nil {
return nil, err
}
images = append(images, r...)
}
return images, nil
}
func parseJSONPath(input interface{}, parser *jsonpath.JSONPath, template string) ([]string, error) {
buf := new(bytes.Buffer)
if err := parser.Parse(template); err != nil {
return nil, err
}
if err := parser.Execute(buf, input); err != nil {
return nil, err
}
r := strings.Split(buf.String(), " ")
return r, nil
}

View File

@@ -0,0 +1,84 @@
package images
import (
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/client-go/util/jsonpath"
"reflect"
"testing"
)
var (
jsona = []byte(`{
"flatImage": "name/of/image:with-tag",
"deeply": {
"nested": {
"image": "another/image/name:with-a-tag",
"set": [
{ "image": "first/in/list:123" },
{ "image": "second/in:456" }
]
}
}
}`)
)
func Test_parseJSONPath(t *testing.T) {
var data interface{}
if err := json.Unmarshal(jsona, &data); err != nil {
t.Errorf("failed to unmarshal test article, %v", err)
}
j := jsonpath.New("")
type args struct {
input interface{}
name string
template string
}
tests := []struct {
name string
args args
want []string
wantErr bool
}{
{
name: "should find flat path with string result",
args: args{
input: data,
name: "wut",
template: "{.flatImage}",
},
want: []string{"name/of/image:with-tag"},
},
{
name: "should find nested path with string result",
args: args{
input: data,
name: "wut",
template: "{.deeply.nested.image}",
},
want: []string{"another/image/name:with-a-tag"},
},
{
name: "should find nested path with slice result",
args: args{
input: data,
name: "wut",
template: "{.deeply.nested.set[*].image}",
},
want: []string{"first/in/list:123", "second/in:456"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := parseJSONPath(tt.args.input, j, tt.args.template)
if (err != nil) != tt.wantErr {
t.Errorf("parseJSONPath() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("parseJSONPath() got = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -1,561 +1,141 @@
package packager
import (
"archive/tar"
"bufio"
"bytes"
"compress/gzip"
"fmt"
"context"
fleetapi "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
"github.com/rancher/fleet/pkg/bundle"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/fs"
"github.com/rancherfederal/hauler/pkg/packager/images"
"io"
"io/ioutil"
"log"
"k8s.io/apimachinery/pkg/util/json"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"path/filepath"
)
const (
distroSep = ":"
distroK3s = "k3s"
// distroRKE2 = "rke2"
)
var (
packageDistros = map[string]bool{
distroK3s: true,
// distroRKE2: true,
}
)
// Config passes all configuration options into a Packager.
type Config struct {
// Destination specifies the writer the package will be outputted to.
Destination io.Writer
// KubernetesVersion specifies the distribution and version of Kubernetes to
// use for the deployed cluster. Must be of format `k3s:1.18.8-rc1+k3s1`;
// currently only supports k3s.
KubernetesVersion string
// HTTPClient is the client to use for all HTTP calls. If nil, will use
// default client from http module. Possible use is to trust additional CAs
// without assuming access to the local filesytem.
HTTPClient *http.Client
type Packager interface {
Bundles(context.Context, ...string) ([]*fleetapi.Bundle, error)
Driver(context.Context, v1alpha1.Drive) error
Fleet(context.Context, v1alpha1.Fleet) error
Archive(Archiver, v1alpha1.Package, string) error
}
func (c Config) Complete() (completedPackageConfig, error) {
splitK8s := strings.Split(c.KubernetesVersion, distroSep)
if len(splitK8s) != 2 {
return completedPackageConfig{}, fmt.Errorf("kubernetes version %s not supported", c.KubernetesVersion)
}
k8sDistro, k8sVersion := splitK8s[0], splitK8s[1]
if !packageDistros[k8sDistro] {
return completedPackageConfig{}, fmt.Errorf("kubernetes distribution %s not supported", k8sDistro)
}
httpClient := c.HTTPClient
if httpClient == nil {
httpClient = http.DefaultClient
}
res := completedPackageConfig{
dst: c.Destination,
k8sDistro: k8sDistro,
k8sVersion: k8sVersion,
httpClient: httpClient,
}
return res, nil
type pkg struct {
fs fs.PkgFs
}
type completedPackageConfig struct {
dst io.Writer
k8sDistro string
k8sVersion string
httpClient *http.Client
//NewPackager loads a new packager given a path on disk
func NewPackager(path string) Packager {
return pkg{
fs: fs.NewPkgFS(path),
}
}
// Packager provides the functionality for collecting and packaging all
// dependencies required to install a Kubernetes cluster and deploy utility
// applications to that cluster.
type Packager struct {
completedPackageConfig
func (p pkg) Archive(a Archiver, pkg v1alpha1.Package, output string) error {
data, err := json.Marshal(pkg)
if err != nil {
return err
}
if err = p.fs.WriteFile("package.json", data, 0644); err != nil {
return err
}
return Package(a, p.fs.Path(), output)
}
// New returns a new Packager from the provided config
func New(config Config) (*Packager, error) {
completeConfig, err := config.Complete()
func (p pkg) Bundles(ctx context.Context, path ...string) ([]*fleetapi.Bundle, error) {
opts := &bundle.Options{Compress: true}
var bundles []*fleetapi.Bundle
for _, pth := range path {
bundleName := filepath.Base(pth)
fb, err := bundle.Open(ctx, bundleName, pth, "", opts)
if err != nil {
return nil, err
}
//TODO: Figure out why bundle.Open doesn't return with GVK
bn := fleetapi.NewBundle("fleet-local", bundleName, *fb.Definition)
if err := p.fs.AddBundle(bn); err != nil {
return nil, err
}
bundles = append(bundles, bn)
}
return bundles, nil
}
func (p pkg) Driver(ctx context.Context, d v1alpha1.Drive) error {
if err := writeURL(p.fs, d.BinURL(), "k3s"); err != nil {
return err
}
//TODO: Stop hardcoding
if err := writeURL(p.fs, "https://get.k3s.io", "k3s-init.sh"); err != nil {
return err
}
imgMap, err := images.MapImager(d)
if err != nil {
return err
}
for ref, im := range imgMap {
err := p.fs.AddImage(ref, im)
if err != nil {
return err
}
}
return nil
}
//TODO: Add this to Driver?
func (p pkg) Fleet(ctx context.Context, fl v1alpha1.Fleet) error {
imgMap, err := images.MapImager(fl)
if err != nil {
return err
}
for ref, im := range imgMap {
err := p.fs.AddImage(ref, im)
if err != nil {
return err
}
}
if err := p.fs.AddChart(fl.CRDChart(), fl.Version); err != nil {
return err
}
if err := p.fs.AddChart(fl.Chart(), fl.Version); err != nil {
return err
}
return nil
}
func writeURL(fsys fs.PkgFs, rawURL string, name string) error {
rc, err := fetchURL(rawURL)
if err != nil {
return err
}
defer rc.Close()
return fsys.AddBin(rc, name)
}
func fetchURL(rawURL string) (io.ReadCloser, error) {
resp, err := http.Get(rawURL)
if err != nil {
return nil, err
}
res := &Packager{
completedPackageConfig: completeConfig,
if resp.StatusCode != 200 {
return nil, err
}
return res, nil
}
func (p *Packager) Run() error {
var err error
gzipWriter := gzip.NewWriter(p.dst)
defer gzipWriter.Close()
tarWriter := tar.NewWriter(gzipWriter)
defer tarWriter.Close()
switch p.k8sDistro {
case distroK3s:
err = packageK3sArtifacts(p, tarWriter, p.k8sVersion)
// case distroRKE2:
// err = packageRKE2Artifacts(tarWriter, p.k8sVersion)
}
if err != nil {
return fmt.Errorf("package %s artifacts: %v", p.k8sDistro, err)
}
return nil
}
// Artifact represents a source artifact with an optional name to prevent
// conflicts. Source must point to a local file or an http/https endpoint.
type Artifact struct {
Source string
Name string
}
// GetName returns the explicitly set name of the artifact or the base path
// segment as a default.
func (a Artifact) GetName() string {
if a.Name == "" {
return path.Base(a.Source)
}
return a.Name
}
// ArtifactGroup defines a set of collected artifacts in a packaged directory.
type ArtifactGroup struct {
PackagePath string
Binaries []Artifact
ImageArchives []Artifact
ImageLists []Artifact
// TODO - validate SHA256 hashes
// SHA256Sums []string
}
const (
binBaseDirName = "bin"
imageArchivesBaseDirName = "image-archives"
imageListsBaseDirName = "image-lists"
)
// Package downloads or reads all files in the ArtifactGroup and writes
// them to dst according to the expected file structure. Package does not
// handle creating any parent directory Headers in dst; this must be handled
// outside of this function.
func (p *Packager) Package(dst *tar.Writer, g ArtifactGroup) error {
packageTime := time.Now()
// TODO - download/read Binaries, ImageArchives, ImageLists in parallel
// collect and package all binaries
if len(g.Binaries) != 0 {
binFolderHeader := &tar.Header{
Typeflag: tar.TypeDir,
Name: path.Join(g.PackagePath, binBaseDirName),
ModTime: packageTime,
Mode: 0755,
}
if err := dst.WriteHeader(binFolderHeader); err != nil {
return fmt.Errorf("add tar header for binary directory %q: %v", binFolderHeader.Name, err)
}
for _, binArtifact := range g.Binaries {
binName := binArtifact.GetName()
buf := &bytes.Buffer{}
if err := p.PackageBinaryArtifact(buf, binArtifact); err != nil {
return fmt.Errorf("package binary %q: %v", binName, err)
}
binHeader := &tar.Header{
Typeflag: tar.TypeReg,
Name: path.Join(g.PackagePath, binBaseDirName, binName),
Size: int64(buf.Len()),
ModTime: packageTime,
Mode: 0755,
}
if err := dst.WriteHeader(binHeader); err != nil {
return fmt.Errorf("add tar header for binary %q: %v", binHeader.Name, err)
}
if _, err := io.Copy(dst, buf); err != nil {
return fmt.Errorf("add binary %q: %v", binHeader.Name, err)
}
}
}
// collect and package all image archives
if len(g.ImageArchives) != 0 {
imageArchivesFolderHeader := &tar.Header{
Typeflag: tar.TypeDir,
Name: path.Join(g.PackagePath, imageArchivesBaseDirName),
ModTime: packageTime,
Mode: 0755,
}
if err := dst.WriteHeader(imageArchivesFolderHeader); err != nil {
return fmt.Errorf("add tar header for image archive directory %q: %v", imageArchivesFolderHeader.Name, err)
}
for _, imageArchiveArtifact := range g.ImageArchives {
imageArchiveName := imageArchiveArtifact.GetName()
buf := &bytes.Buffer{}
if err := p.PackageImageArchiveArtifact(buf, imageArchiveArtifact); err != nil {
return fmt.Errorf("package image archive %q: %v", imageArchiveName, err)
}
imageArchiveHeader := &tar.Header{
Typeflag: tar.TypeReg,
Name: path.Join(g.PackagePath, imageArchivesBaseDirName, imageArchiveName+".gz"),
Size: int64(buf.Len()),
ModTime: packageTime,
Mode: 0644,
}
if err := dst.WriteHeader(imageArchiveHeader); err != nil {
return fmt.Errorf("add tar header for image archive %q: %v", imageArchiveName, err)
}
if _, err := io.Copy(dst, buf); err != nil {
return fmt.Errorf("add iamge archive %q: %v", imageArchiveHeader.Name, err)
}
}
}
// collect and package all image lists
if len(g.ImageLists) != 0 {
imageListsFolderHeader := &tar.Header{
Typeflag: tar.TypeDir,
Name: path.Join(g.PackagePath, imageListsBaseDirName),
ModTime: packageTime,
Mode: 0755,
}
if err := dst.WriteHeader(imageListsFolderHeader); err != nil {
return fmt.Errorf("add tar header for image list directory %q: %v", imageListsFolderHeader.Name, err)
}
for _, imageListArtifact := range g.ImageLists {
imageListName := imageListArtifact.GetName()
buf := &bytes.Buffer{}
if err := p.PackageImageListArtifact(buf, imageListArtifact); err != nil {
return fmt.Errorf("package image list %q: %v", imageListName, err)
}
imageListHeader := &tar.Header{
Typeflag: tar.TypeReg,
Name: path.Join(g.PackagePath, imageListsBaseDirName, imageListName+".tar.gz"),
Size: int64(buf.Len()),
ModTime: packageTime,
Mode: 0644,
}
if err := dst.WriteHeader(imageListHeader); err != nil {
return fmt.Errorf("add tar header for image list %q: %v", imageListName, err)
}
if _, err := io.Copy(dst, buf); err != nil {
return fmt.Errorf("add iamge list %q: %v", imageListHeader.Name, err)
}
}
}
return nil
}
// PackageBinaryArtifact writes the contents of the specified binary to dst,
// fetching the binary from artifact.Source.
func (p *Packager) PackageBinaryArtifact(dst io.Writer, artifact Artifact) error {
srcURL, err := url.Parse(artifact.Source)
if err != nil {
return fmt.Errorf("parse binary source %q: %v", artifact.Source, err)
}
var binSrc io.Reader
if srcURL.Host != "" {
switch srcURL.Scheme {
case "http", "https":
resp, err := p.httpClient.Get(srcURL.String())
if err != nil {
return fmt.Errorf("get binary from URL %q: %v", srcURL, err)
}
defer resp.Body.Close()
// TODO - confirm good HTTP status codes
if resp.StatusCode != 200 {
b := &strings.Builder{}
if _, cpErr := io.Copy(b, resp.Body); cpErr != nil {
// could not get response body, return simple error
return fmt.Errorf(
"get binary from URL %q: bad response: status %s",
srcURL, resp.Status,
)
}
return fmt.Errorf(
"get binary from URL %q: bad response: status %s, body %s",
srcURL, resp.Status, b.String(),
)
}
binSrc = resp.Body
default:
return fmt.Errorf("unsupported URL scheme %q", srcURL.Scheme)
}
} else {
// TODO - use more efficient but lower-level os.Open to minimize RAM use
binBuf, err := ioutil.ReadFile(srcURL.String())
if err != nil {
return fmt.Errorf("read binary file %q: %v", srcURL.String(), err)
}
binSrc = bytes.NewBuffer(binBuf)
}
// package binary by performing a simple copy
if _, err := io.Copy(dst, binSrc); err != nil {
return fmt.Errorf("write binary %q: %v", artifact.GetName(), err)
}
return nil
}
// PackageImageArchiveArtifact writes the gzip-compressed contents of the
// specified image archive to dst, fetching the archive from artifact.Source.
func (p *Packager) PackageImageArchiveArtifact(dst io.Writer, artifact Artifact) error {
srcURL, err := url.Parse(artifact.Source)
if err != nil {
return fmt.Errorf("parse image archive source %q: %v", artifact.Source, err)
}
var archiveSrc io.Reader
if srcURL.Host != "" {
switch srcURL.Scheme {
case "http", "https":
resp, err := p.httpClient.Get(srcURL.String())
if err != nil {
return fmt.Errorf("get image archive from URL %q: %v", srcURL, err)
}
defer resp.Body.Close()
// TODO - confirm good HTTP status codes
if resp.StatusCode != 200 {
b := &strings.Builder{}
if _, cpErr := io.Copy(b, resp.Body); cpErr != nil {
// could not get response body, return simple error
return fmt.Errorf(
"get image archive from URL %q: bad response: status %s",
srcURL, resp.Status,
)
}
return fmt.Errorf(
"get image archive from URL %q: bad response: status %s, body %s",
srcURL, resp.Status, b.String(),
)
}
archiveSrc = resp.Body
default:
return fmt.Errorf("unsupported URL scheme %q", srcURL.Scheme)
}
} else {
// TODO - use more efficient but lower-level os.Open to minimize RAM use
archiveBuf, err := ioutil.ReadFile(srcURL.String())
if err != nil {
return fmt.Errorf("read image archive file %q: %v", srcURL.String(), err)
}
archiveSrc = bytes.NewBuffer(archiveBuf)
}
// package image archive by performing a copy to a gzip writer
gzipDst := gzip.NewWriter(dst)
defer gzipDst.Close()
if _, err := io.Copy(gzipDst, archiveSrc); err != nil {
return fmt.Errorf("write image archive %q: %v", artifact.GetName(), err)
}
return nil
}
// PackageImageListArtifact fetches the contents of an image list from
// artifact.Source and calls ArchiveImageList to write the resulting images
// to dst.
func (p *Packager) PackageImageListArtifact(dst io.Writer, artifact Artifact) error {
srcURL, err := url.Parse(artifact.Source)
if err != nil {
return fmt.Errorf("parse image list source %q: %v", artifact.Source, err)
}
var listSrc io.Reader
if srcURL.Host != "" {
switch srcURL.Scheme {
case "http", "https":
resp, err := p.httpClient.Get(srcURL.String())
if err != nil {
return fmt.Errorf("get image list from URL %q: %v", srcURL, err)
}
defer resp.Body.Close()
// TODO - confirm good HTTP status codes
if resp.StatusCode != 200 {
b := &strings.Builder{}
if _, cpErr := io.Copy(b, resp.Body); cpErr != nil {
// could not get response body, return simple error
return fmt.Errorf(
"get image list from URL %q: bad response: status %s",
srcURL, resp.Status,
)
}
return fmt.Errorf(
"get image list from URL %q: bad response: status %s, body %s",
srcURL, resp.Status, b.String(),
)
}
listSrc = resp.Body
default:
return fmt.Errorf("unsupported URL scheme %q", srcURL.Scheme)
}
} else {
// TODO - use more efficient but lower-level os.Open to minimize RAM use
listBuf, err := ioutil.ReadFile(srcURL.String())
if err != nil {
return fmt.Errorf("read image list file %q: %v", srcURL.String(), err)
}
listSrc = bytes.NewBuffer(listBuf)
}
if err := p.PackageImageList(dst, listSrc); err != nil {
return fmt.Errorf("archive images: %v", err)
}
return nil
}
// PackageImageList reads a list of newline-delimited images from src, pulls all
// images, and writes the gzip-compressed tarball to dst.
func (p *Packager) PackageImageList(dst io.Writer, src io.Reader) error {
scanner := bufio.NewScanner(src)
refToImage := map[name.Reference]v1.Image{}
for scanner.Scan() {
src := scanner.Text()
ref, err := name.ParseReference(src)
if err != nil {
return fmt.Errorf("bad image reference %s: %v", src, err)
}
transport := p.httpClient.Transport
if transport == nil {
transport = http.DefaultTransport
}
img, err := crane.Pull(src, crane.WithTransport(transport))
if err != nil {
return fmt.Errorf("pull image %s: %v", src, err)
}
refToImage[ref] = img
}
if err := scanner.Err(); err != nil {
return fmt.Errorf("read image list: %v", err)
}
gzipDst := gzip.NewWriter(dst)
defer gzipDst.Close()
if err := tarball.MultiRefWrite(refToImage, gzipDst); err != nil {
return fmt.Errorf("write images: %v", err)
}
return nil
}
const (
k3sDownloadFmtStr = `https://github.com/rancher/k3s/releases/download/%s/%s`
k3sRawFmtStr = `https://raw.githubusercontent.com/rancher/k3s/%s/%s`
)
func packageK3sArtifacts(p *Packager, dst *tar.Writer, version string) error {
binaries := []Artifact{
{Source: fmt.Sprintf(k3sDownloadFmtStr, url.QueryEscape(version), "k3s")},
{Source: fmt.Sprintf(k3sRawFmtStr, url.QueryEscape(version), "install.sh")},
}
imageArchives := []Artifact{
{Source: fmt.Sprintf(k3sDownloadFmtStr, url.QueryEscape(version), "k3s-airgap-images-amd64.tar")},
}
imageLists := []Artifact{
{Source: fmt.Sprintf(k3sDownloadFmtStr, url.QueryEscape(version), "k3s-images.txt")},
}
k3sArtifactGroup := ArtifactGroup{
PackagePath: "kubernetes/k3s",
Binaries: binaries,
ImageArchives: imageArchives,
ImageLists: imageLists,
}
if err := TarMkdirP(dst, k3sArtifactGroup.PackagePath); err != nil {
return fmt.Errorf("make k3s base directory: %v", err)
}
if err := p.Package(dst, k3sArtifactGroup); err != nil {
return fmt.Errorf("collect k3s artifacts: %v", err)
}
log.Println("packageK3sArtifacts done")
return nil
}
// TarMkdirP creates the full chain of Header entries in dst based on the
// provided nested directory string path.
func TarMkdirP(dst *tar.Writer, path string) error {
mkdirpTime := time.Now()
path = strings.TrimSuffix(path, "/")
paths := strings.Split(path, "/")
for i := range paths {
dirName := strings.Join(paths[:i+1], "/") + "/"
dirHeader := &tar.Header{
Typeflag: tar.TypeDir,
Name: dirName,
ModTime: mkdirpTime,
Mode: 0755,
}
if err := dst.WriteHeader(dirHeader); err != nil {
return fmt.Errorf("write directory %q: %v", dirName, err)
}
}
return nil
}
// CopyGzip is a tiny wrapper to write the gzip-compressed contents of src to dst
func CopyGzip(dst io.Writer, src io.Reader) error {
gzipDst := gzip.NewWriter(dst)
defer gzipDst.Close()
_, err := io.Copy(gzipDst, src)
return err
return resp.Body, nil
}

92
pkg/util/files.go Normal file
View File

@@ -0,0 +1,92 @@
package util
import (
"bufio"
"fmt"
"github.com/mholt/archiver/v3"
"io"
"os"
"path/filepath"
)
type dir struct {
Path string
Permission os.FileMode
}
type FSLayout struct {
Root string
dirs []dir
}
type Layout interface {
Create() error
AddDir()
Archive(archiver2 archiver.Archiver) error
Remove() error
}
func NewLayout(root string) *FSLayout {
absRoot, _ := filepath.Abs(root)
return &FSLayout{
Root: absRoot,
dirs: nil,
}
}
//Create will create the FSLayout at the FSLayout.Root
func (l FSLayout) Create() error {
for _, dir := range l.dirs {
fullPath := filepath.Join(l.Root, dir.Path)
if err := os.MkdirAll(fullPath, dir.Permission); err != nil {
return err
}
}
return nil
}
//AddDir will add a folder to the FSLayout
func (l *FSLayout) AddDir(relPath string, perm os.FileMode) {
l.dirs = append(l.dirs, dir{
Path: relPath,
Permission: perm,
})
}
func (l FSLayout) Remove() error {
return os.RemoveAll(l.Root)
}
func (l FSLayout) Archive(a *archiver.TarZstd, name string) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
err = os.Chdir(l.Root)
if err != nil {
return err
}
defer os.Chdir(cwd)
archiveFile := filepath.Join(cwd, fmt.Sprintf("%s.%s", name, a.String()))
if err := a.Archive([]string{"."}, archiveFile); err != nil {
return err
}
return nil
}
func LinesToSlice(r io.ReadCloser) ([]string, error) {
var lines []string
scanner := bufio.NewScanner(r)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
if err := scanner.Err(); err != nil {
return nil, err
}
return lines, nil
}

View File

@@ -0,0 +1,15 @@
version: 2.1
jobs:
lint:
docker:
- image: twuni/helm:3.4.1
steps:
- checkout
- run:
command: helm lint --strict
name: lint
workflows:
version: 2
default:
jobs:
- lint

21
testdata/docker-registry/.helmignore vendored Normal file
View File

@@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

13
testdata/docker-registry/Chart.yaml vendored Normal file
View File

@@ -0,0 +1,13 @@
apiVersion: v1
appVersion: 2.7.1
description: A Helm chart for Docker Registry
home: https://hub.docker.com/_/registry/
icon: https://hub.docker.com/public/images/logos/mini-logo.svg
maintainers:
- email: devin@canterberry.cc
name: Devin Canterberry
url: https://canterberry.cc/
name: docker-registry
sources:
- https://github.com/docker/distribution-library-image
version: 1.10.1

202
testdata/docker-registry/LICENSE vendored Normal file
View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

94
testdata/docker-registry/README.md vendored Normal file
View File

@@ -0,0 +1,94 @@
# Docker Registry Helm Chart
This directory contains a Kubernetes chart to deploy a private Docker Registry.
## Prerequisites Details
* PV support on underlying infrastructure (if persistence is required)
## Chart Details
This chart will do the following:
* Implement a Docker registry deployment
## Installing the Chart
First, add the repo:
```console
$ helm repo add twuni https://helm.twun.io
```
To install the chart, use the following:
```console
$ helm install twuni/docker-registry
```
## Configuration
The following table lists the configurable parameters of the docker-registry chart and
their default values.
| Parameter | Description | Default |
|:----------------------------|:-------------------------------------------------------------------------------------------|:----------------|
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `image.repository` | Container image to use | `registry` |
| `image.tag` | Container image tag to deploy | `2.7.1` |
| `imagePullSecrets` | Specify image pull secrets | `nil` (does not add image pull secrets to deployed pods) |
| `persistence.accessMode` | Access mode to use for PVC | `ReadWriteOnce` |
| `persistence.enabled` | Whether to use a PVC for the Docker storage | `false` |
| `persistence.deleteEnabled` | Enable the deletion of image blobs and manifests by digest | `nil` |
| `persistence.size` | Amount of space to claim for PVC | `10Gi` |
| `persistence.storageClass` | Storage Class to use for PVC | `-` |
| `persistence.existingClaim` | Name of an existing PVC to use for config | `nil` |
| `service.port` | TCP port on which the service is exposed | `5000` |
| `service.type` | service type | `ClusterIP` |
| `service.clusterIP` | if `service.type` is `ClusterIP` and this is non-empty, sets the cluster IP of the service | `nil` |
| `service.nodePort` | if `service.type` is `NodePort` and this is non-empty, sets the node port of the service | `nil` |
| `service.loadBalancerIP` | if `service.type` is `LoadBalancer` and this is non-empty, sets the loadBalancerIP of the service | `nil` |
| `service.loadBalancerSourceRanges`| if `service.type` is `LoadBalancer` and this is non-empty, sets the loadBalancerSourceRanges of the service | `nil` |
| `service.sessionAffinity` | service session affinity | `nil` |
| `service.sessionAffinityConfig` | service session affinity config | `nil` |
| `replicaCount` | k8s replicas | `1` |
| `updateStrategy` | update strategy for deployment | `{}` |
| `podAnnotations` | Annotations for pod | `{}` |
| `podLabels` | Labels for pod | `{}` |
| `podDisruptionBudget` | Pod disruption budget | `{}` |
| `resources.limits.cpu` | Container requested CPU | `nil` |
| `resources.limits.memory` | Container requested memory | `nil` |
| `priorityClassName ` | priorityClassName | `""` |
| `storage` | Storage system to use | `filesystem` |
| `tlsSecretName` | Name of secret for TLS certs | `nil` |
| `secrets.htpasswd` | Htpasswd authentication | `nil` |
| `secrets.s3.accessKey` | Access Key for S3 configuration | `nil` |
| `secrets.s3.secretKey` | Secret Key for S3 configuration | `nil` |
| `secrets.swift.username` | Username for Swift configuration | `nil` |
| `secrets.swift.password` | Password for Swift configuration | `nil` |
| `haSharedSecret` | Shared secret for Registry | `nil` |
| `configData` | Configuration hash for docker | `nil` |
| `s3.region` | S3 region | `nil` |
| `s3.regionEndpoint` | S3 region endpoint | `nil` |
| `s3.bucket` | S3 bucket name | `nil` |
| `s3.encrypt` | Store images in encrypted format | `nil` |
| `s3.secure` | Use HTTPS | `nil` |
| `swift.authurl` | Swift authurl | `nil` |
| `swift.container` | Swift container | `nil` |
| `nodeSelector` | node labels for pod assignment | `{}` |
| `affinity` | affinity settings | `{}` |
| `tolerations` | pod tolerations | `[]` |
| `ingress.enabled` | If true, Ingress will be created | `false` |
| `ingress.annotations` | Ingress annotations | `{}` |
| `ingress.labels` | Ingress labels | `{}` |
| `ingress.path` | Ingress service path | `/` |
| `ingress.hosts` | Ingress hostnames | `[]` |
| `ingress.tls` | Ingress TLS configuration (YAML) | `[]` |
| `extraVolumeMounts` | Additional volumeMounts to the registry container | `[]` |
| `extraVolumes` | Additional volumes to the pod | `[]` |
Specify each parameter using the `--set key=value[,key=value]` argument to
`helm install`.
To generate htpasswd file, run this docker command:
`docker run --entrypoint htpasswd registry:2 -Bbn user password > ./htpasswd`.

View File

@@ -0,0 +1,19 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "docker-registry.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "docker-registry.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "docker-registry.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "docker-registry.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl -n {{ .Release.Namespace }} port-forward $POD_NAME 8080:5000
{{- end }}

View File

@@ -0,0 +1,24 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "docker-registry.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "docker-registry.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "docker-registry.fullname" . }}-config
labels:
app: {{ template "docker-registry.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
data:
config.yml: |-
{{ toYaml .Values.configData | indent 4 }}

View File

@@ -0,0 +1,221 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "docker-registry.fullname" . }}
labels:
app: {{ template "docker-registry.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
matchLabels:
app: {{ template "docker-registry.name" . }}
release: {{ .Release.Name }}
replicas: {{ .Values.replicaCount }}
{{- if .Values.updateStrategy }}
strategy:
{{ toYaml .Values.updateStrategy | indent 4 }}
{{- end }}
minReadySeconds: 5
template:
metadata:
labels:
app: {{ template "docker-registry.name" . }}
release: {{ .Release.Name }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if $.Values.podAnnotations }}
{{ toYaml $.Values.podAnnotations | indent 8 }}
{{- end }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
{{- if .Values.securityContext.enabled }}
securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }}
runAsUser: {{ .Values.securityContext.runAsUser }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- /bin/registry
- serve
- /etc/docker/registry/config.yml
ports:
- containerPort: 5000
livenessProbe:
httpGet:
{{- if .Values.tlsSecretName }}
scheme: HTTPS
{{- end }}
path: /
port: 5000
readinessProbe:
httpGet:
{{- if .Values.tlsSecretName }}
scheme: HTTPS
{{- end }}
path: /
port: 5000
resources:
{{ toYaml .Values.resources | indent 12 }}
env:
{{- if .Values.secrets.htpasswd }}
- name: REGISTRY_AUTH
value: "htpasswd"
- name: REGISTRY_AUTH_HTPASSWD_REALM
value: "Registry Realm"
- name: REGISTRY_AUTH_HTPASSWD_PATH
value: "/auth/htpasswd"
{{- end }}
- name: REGISTRY_HTTP_SECRET
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: haSharedSecret
{{- if .Values.tlsSecretName }}
- name: REGISTRY_HTTP_TLS_CERTIFICATE
value: /etc/ssl/docker/tls.crt
- name: REGISTRY_HTTP_TLS_KEY
value: /etc/ssl/docker/tls.key
{{- end }}
{{- if eq .Values.storage "filesystem" }}
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: "/var/lib/registry"
{{- else if eq .Values.storage "azure" }}
- name: REGISTRY_STORAGE_AZURE_ACCOUNTNAME
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: azureAccountName
- name: REGISTRY_STORAGE_AZURE_ACCOUNTKEY
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: azureAccountKey
- name: REGISTRY_STORAGE_AZURE_CONTAINER
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: azureContainer
{{- else if eq .Values.storage "s3" }}
{{- if and .Values.secrets.s3.secretKey .Values.secrets.s3.accessKey }}
- name: REGISTRY_STORAGE_S3_ACCESSKEY
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: s3AccessKey
- name: REGISTRY_STORAGE_S3_SECRETKEY
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: s3SecretKey
{{- end }}
- name: REGISTRY_STORAGE_S3_REGION
value: {{ required ".Values.s3.region is required" .Values.s3.region }}
{{- if .Values.s3.regionEndpoint }}
- name: REGISTRY_STORAGE_S3_REGIONENDPOINT
value: {{ .Values.s3.regionEndpoint }}
{{- end }}
- name: REGISTRY_STORAGE_S3_BUCKET
value: {{ required ".Values.s3.bucket is required" .Values.s3.bucket }}
{{- if .Values.s3.encrypt }}
- name: REGISTRY_STORAGE_S3_ENCRYPT
value: {{ .Values.s3.encrypt | quote }}
{{- end }}
{{- if .Values.s3.secure }}
- name: REGISTRY_STORAGE_S3_SECURE
value: {{ .Values.s3.secure | quote }}
{{- end }}
{{- else if eq .Values.storage "swift" }}
- name: REGISTRY_STORAGE_SWIFT_AUTHURL
value: {{ required ".Values.swift.authurl is required" .Values.swift.authurl }}
- name: REGISTRY_STORAGE_SWIFT_USERNAME
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: swiftUsername
- name: REGISTRY_STORAGE_SWIFT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "docker-registry.fullname" . }}-secret
key: swiftPassword
- name: REGISTRY_STORAGE_SWIFT_CONTAINER
value: {{ required ".Values.swift.container is required" .Values.swift.container }}
{{- end }}
{{- if .Values.persistence.deleteEnabled }}
- name: REGISTRY_STORAGE_DELETE_ENABLED
value: "true"
{{- end }}
volumeMounts:
{{- if .Values.secrets.htpasswd }}
- name: auth
mountPath: /auth
readOnly: true
{{- end }}
{{- if eq .Values.storage "filesystem" }}
- name: data
mountPath: /var/lib/registry/
{{- end }}
- name: "{{ template "docker-registry.fullname" . }}-config"
mountPath: "/etc/docker/registry"
{{- if .Values.tlsSecretName }}
- mountPath: /etc/ssl/docker
name: tls-cert
readOnly: true
{{- end }}
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
volumes:
{{- if .Values.secrets.htpasswd }}
- name: auth
secret:
secretName: {{ template "docker-registry.fullname" . }}-secret
items:
- key: htpasswd
path: htpasswd
{{- end }}
{{- if eq .Values.storage "filesystem" }}
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "docker-registry.fullname" . }}{{- end }}
{{- else }}
emptyDir: {}
{{- end -}}
{{- end }}
- name: {{ template "docker-registry.fullname" . }}-config
configMap:
name: {{ template "docker-registry.fullname" . }}-config
{{- if .Values.tlsSecretName }}
- name: tls-cert
secret:
secretName: {{ .Values.tlsSecretName }}
{{- end }}
{{- with .Values.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,36 @@
{{- if .Values.ingress.enabled -}}
{{- $serviceName := include "docker-registry.fullname" . -}}
{{- $servicePort := .Values.service.port -}}
{{- $path := .Values.ingress.path -}}
apiVersion: {{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} networking.k8s.io/v1beta1 {{- else }} extensions/v1beta1 {{- end }}
kind: Ingress
metadata:
name: {{ template "docker-registry.fullname" . }}
labels:
app: {{ template "docker-registry.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.ingress.labels }}
{{ toYaml .Values.ingress.labels | indent 4 }}
{{- end }}
annotations:
{{- range $key, $value := .Values.ingress.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
rules:
{{- range $host := .Values.ingress.hosts }}
- host: {{ $host }}
http:
paths:
- path: {{ $path }}
backend:
serviceName: {{ $serviceName }}
servicePort: {{ $servicePort }}
{{- end -}}
{{- if .Values.ingress.tls }}
tls:
{{ toYaml .Values.ingress.tls | indent 4 }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,17 @@
{{- if .Values.podDisruptionBudget -}}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "docker-registry.fullname" . }}
labels:
app: {{ template "docker-registry.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
matchLabels:
app: {{ template "docker-registry.name" . }}
release: {{ .Release.Name }}
{{ toYaml .Values.podDisruptionBudget | indent 2 }}
{{- end -}}

View File

@@ -0,0 +1,26 @@
{{- if .Values.persistence.enabled }}
{{- if not .Values.persistence.existingClaim -}}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "docker-registry.fullname" . }}
labels:
app: {{ template "docker-registry.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.storageClass }}"
{{- end }}
{{- end }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,37 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ template "docker-registry.fullname" . }}-secret
labels:
app: {{ template "docker-registry.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
type: Opaque
data:
{{- if .Values.secrets.htpasswd }}
htpasswd: {{ .Values.secrets.htpasswd | b64enc }}
{{- end }}
{{- if .Values.secrets.haSharedSecret }}
haSharedSecret: {{ .Values.secrets.haSharedSecret | b64enc | quote }}
{{- else }}
haSharedSecret: {{ randAlphaNum 16 | b64enc | quote }}
{{- end }}
{{- if eq .Values.storage "azure" }}
{{- if and .Values.secrets.azure.accountName .Values.secrets.azure.accountKey .Values.secrets.azure.container }}
azureAccountName: {{ .Values.secrets.azure.accountName | b64enc | quote }}
azureAccountKey: {{ .Values.secrets.azure.accountKey | b64enc | quote }}
azureContainer: {{ .Values.secrets.azure.container | b64enc | quote }}
{{- end }}
{{- else if eq .Values.storage "s3" }}
{{- if and .Values.secrets.s3.secretKey .Values.secrets.s3.accessKey }}
s3AccessKey: {{ .Values.secrets.s3.accessKey | b64enc | quote }}
s3SecretKey: {{ .Values.secrets.s3.secretKey | b64enc | quote }}
{{- end }}
{{- else if eq .Values.storage "swift" }}
{{- if and .Values.secrets.swift.username .Values.secrets.swift.password }}
swiftUsername: {{ .Values.secrets.swift.username | b64enc | quote }}
swiftPassword: {{ .Values.secrets.swift.password | b64enc | quote }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,42 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "docker-registry.fullname" . }}
labels:
app: {{ template "docker-registry.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.service.annotations }}
annotations:
{{ toYaml .Values.service.annotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if (and (eq .Values.service.type "ClusterIP") (not (empty .Values.service.clusterIP))) }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
{{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP))) }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges))) }}
loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }}
{{- end }}
{{- if .Values.service.sessionAffinity }}
sessionAffinity: {{ .Values.service.sessionAffinity }}
{{- if .Values.service.sessionAffinityConfig }}
sessionAffinityConfig:
{{ toYaml .Values.service.sessionAffinityConfig | nindent 4 }}
{{- end -}}
{{- end }}
ports:
- port: {{ .Values.service.port }}
protocol: TCP
name: {{ if .Values.tlsSecretName }}https{{ else }}http{{ end }}-{{ .Values.service.port }}
targetPort: 5000
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector:
app: {{ template "docker-registry.name" . }}
release: {{ .Release.Name }}

149
testdata/docker-registry/values.yaml vendored Normal file
View File

@@ -0,0 +1,149 @@
# Default values for docker-registry.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 2
updateStrategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
podAnnotations: {}
podLabels: {}
image:
repository: registry
tag: 2.7.1
pullPolicy: IfNotPresent
# imagePullSecrets:
# - name: docker
service:
name: registry
type: ClusterIP
# sessionAffinity: None
# sessionAffinityConfig: {}
# clusterIP:
port: 5000
# nodePort:
# loadBalancerIP:
# loadBalancerSourceRanges:
annotations: {}
# foo.io/bar: "true"
ingress:
enabled: false
path: /
# Used to create an Ingress record.
hosts:
- chart-example.local
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
persistence:
accessMode: 'ReadWriteOnce'
enabled: false
size: 10Gi
# storageClass: '-'
# set the type of filesystem to use: filesystem, s3
storage: filesystem
# Set this to name of secret for tls certs
# tlsSecretName: registry.docker.example.com
secrets:
haSharedSecret: ""
htpasswd: ""
# Secrets for Azure
# azure:
# accountName: ""
# accountKey: ""
# container: ""
# Secrets for S3 access and secret keys
# s3:
# accessKey: ""
# secretKey: ""
# Secrets for Swift username and password
# swift:
# username: ""
# password: ""
# Options for s3 storage type:
# s3:
# region: us-east-1
# regionEndpoint: s3.us-east-1.amazonaws.com
# bucket: my-bucket
# encrypt: false
# secure: true
# Options for swift storage type:
# swift:
# authurl: http://swift.example.com/
# container: my-container
configData:
version: 0.1
log:
fields:
service: registry
storage:
cache:
blobdescriptor: inmemory
http:
addr: :5000
headers:
X-Content-Type-Options: [nosniff]
health:
storagedriver:
enabled: true
interval: 10s
threshold: 3
securityContext:
enabled: true
runAsUser: 1000
fsGroup: 1000
priorityClassName: ""
podDisruptionBudget: {}
# maxUnavailable: 1
# minAvailable: 2
nodeSelector: {}
affinity: {}
tolerations: []
extraVolumeMounts: []
## Additional volumeMounts to the registry container.
# - mountPath: /secret-data
# name: cloudfront-pem-secret
# readOnly: true
extraVolumes: []
## Additional volumes to the pod.
# - name: cloudfront-pem-secret
# secret:
# secretName: cloudfront-credentials
# items:
# - key: cloudfront.pem
# path: cloudfront.pem
# mode: 511

109
testdata/rawmanifests/podinfo.yaml vendored Normal file
View File

@@ -0,0 +1,109 @@
---
# Source: podinfo/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: tester-podinfo
labels:
helm.sh/chart: podinfo-5.2.1
app.kubernetes.io/name: tester-podinfo
app.kubernetes.io/version: "5.2.1"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 9898
targetPort: http
protocol: TCP
name: http
- port: 9999
targetPort: grpc
protocol: TCP
name: grpc
selector:
app.kubernetes.io/name: tester-podinfo
---
# Source: podinfo/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tester-podinfo
labels:
helm.sh/chart: podinfo-5.2.1
app.kubernetes.io/name: tester-podinfo
app.kubernetes.io/version: "5.2.1"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/name: tester-podinfo
template:
metadata:
labels:
app.kubernetes.io/name: tester-podinfo
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9898"
spec:
terminationGracePeriodSeconds: 30
containers:
- name: podinfo
image: "ghcr.io/stefanprodan/podinfo:5.2.1"
imagePullPolicy: IfNotPresent
command:
- ./podinfo
- --port=9898
- --cert-path=/data/cert
- --port-metrics=9797
- --grpc-port=9999
- --grpc-service-name=podinfo
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: "#34577c"
ports:
- name: http
containerPort: 9898
protocol: TCP
- name: http-metrics
containerPort: 9797
protocol: TCP
- name: grpc
containerPort: 9999
protocol: TCP
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 1
timeoutSeconds: 5
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 1
timeoutSeconds: 5
volumeMounts:
- name: data
mountPath: /data
resources:
limits: null
requests:
cpu: 1m
memory: 16Mi
volumes:
- name: data
emptyDir: {}

View File

@@ -15,10 +15,14 @@ then
echo "[FATAL] disable firewalld first"
fi
SELINUXSTATUS=$(getenforce)
if [ "$SELINUXSTATUS" == "Permissive" ] || [ "$SELINUXSTATUS" == "Enforcing" ] ; then
echo "[FATAL] disable selinux"
exit 1
mkdir -p /etc/rancher/rke2/
# TODO - allow using selinux
SELINUXSTATUS="$(getenforce)"
if [ "$SELINUXSTATUS" = "Permissive" ] || [ "$SELINUXSTATUS" = "Enforcing" ]
then
echo "selinux: true" | sudo tee -a /etc/rancher/rke2/config.yaml > /dev/null
else
echo "SELINUX disabled. continuing"
fi
@@ -30,26 +34,32 @@ mkdir -p ${LOCAL_IMAGES_FILEPATH}
cp ${ARTIFACT_DIR}/images/* ${LOCAL_IMAGES_FILEPATH}
# TODO - add ability to use local binary with yum install
# ----------------------------------------------------------
# uncomment to use a specific local binary for the install
# ----------------------------------------------------------
# LOCAL_RKE2_BIN='rke2-beta13-dev'
if [ -n "${LOCAL_RKE2_BIN}" ] && [ -f "${ARTIFACT_DIR}/bin/${LOCAL_RKE2_BIN}" ] ; then
echo "Use "${ARTIFACT_DIR}/bin/${LOCAL_RKE2_BIN}" for rke2 binary"
#if [ -n "${LOCAL_RKE2_BIN}" ] && [ -f "${ARTIFACT_DIR}/bin/${LOCAL_RKE2_BIN}" ] ; then
# echo "Use "${ARTIFACT_DIR}/bin/${LOCAL_RKE2_BIN}" for rke2 binary"
#
# INSTALL_RKE2_SKIP_START=true \
# RKE2_RUNTIME_IMAGE="rancher/rke2-runtime:${RKE2_VERSION_DOCKER}" \
# ${ARTIFACT_DIR}/bin/rke2-installer.run
#
# rm -f /usr/local/bin/rke2
#
# cp "${ARTIFACT_DIR}/bin/${LOCAL_RKE2_BIN}" /usr/local/bin/rke2
#
# systemctl start rke2
#else
# ${ARTIFACT_DIR}/bin/rke2-installer.run
#fi
INSTALL_RKE2_SKIP_START=true \
RKE2_RUNTIME_IMAGE="rancher/rke2-runtime:${RKE2_VERSION_DOCKER}" \
${ARTIFACT_DIR}/bin/rke2-installer.run
yum install -y ${ARTIFACT_DIR}/rpm/*
rm -f /usr/local/bin/rke2
cp "${ARTIFACT_DIR}/bin/${LOCAL_RKE2_BIN}" /usr/local/bin/rke2
systemctl start rke2
else
${ARTIFACT_DIR}/bin/rke2-installer.run
fi
systemctl enable rke2-server && systemctl start rke2-server
while [ -f "/etc/rancher/rke2/rke2.yaml" ] ; do
echo "Waiting for /etc/rancher/rke2/rke2.yaml to exist..."

View File

@@ -1,169 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: registry
---
apiVersion: v1
kind: ConfigMap
metadata:
name: docker-registry
namespace: registry
data:
registry-config.yml: |
version: 0.1
log:
fields:
service: registry
storage:
cache:
blobdescriptor: inmemory
filesystem:
rootdirectory: /var/lib/registry
http:
addr: :5000
headers:
X-Content-Type-Options: [nosniff]
# auth:
# htpasswd:
# realm: basic-realm
# path: /auth/htpasswd
health:
storagedriver:
enabled: true
interval: 10s
threshold: 3
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
cattle.io/creator: norman
workload.user.cattle.io/workloadselector: deployment-registry-registry
name: registry
namespace: registry
spec:
replicas: 1
selector:
matchLabels:
workload.user.cattle.io/workloadselector: deployment-registry-registry
template:
metadata:
labels:
workload.user.cattle.io/workloadselector: deployment-registry-registry
spec:
containers:
- image: registry:2
imagePullPolicy: Always
name: registry
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities: {}
privileged: false
readOnlyRootFilesystem: false
runAsNonRoot: false
stdin: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
tty: true
volumeMounts:
- mountPath: /var/lib/registry
name: registryvol
- name: config
mountPath: /etc/docker/registry
readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- name: registryvol
persistentVolumeClaim:
claimName: registryvol
- name: config
configMap:
name: docker-registry
items:
- key: registry-config.yml
path: config.yml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
cattle.io/creator: norman
name: registryvol
namespace: registry
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
labels:
cattle.io/creator: norman
name: registrysvc
namespace: registry
spec:
ports:
- name: httpregistry
port: 5000
protocol: TCP
targetPort: 5000
selector:
workload.user.cattle.io/workloadselector: deployment-registry-registry
sessionAffinity: None
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
labels:
cattle.io/creator: norman
foo: bar
name: registrynodeport
namespace: registry
spec:
ports:
- name: http
nodePort: 30500
port: 5000
protocol: TCP
targetPort: 5000
selector:
workload.user.cattle.io/workloadselector: deployment-registry-registry
sessionAffinity: None
type: NodePort
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
labels:
cattle.io/creator: norman
name: registryingress
namespace: registry
spec:
rules:
- host: registry
http:
paths:
- backend:
serviceName: registrysvc
servicePort: 5000
pathType: ImplementationSpecific