Compare commits

..

313 Commits

Author SHA1 Message Date
Adam Martin
c592551a37 fix image ref keys getting squashed when containing sigs/atts (#291)
* fix image ref keys getting squashed when containing sigs/atts

Signed-off-by: Adam Martin <adam.martin@ranchergovernment.com>

---------

Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
Signed-off-by: Adam Martin <adam.martin@ranchergovernment.com>
Co-authored-by: Adam Martin <adam.martin@rancherfederal.com>
2024-08-13 12:18:10 -07:00
Jacob Blain Christen
ef3eb05fce fix missing versin info in release build (#283)
Signed-off-by: Jacob Blain Christen <dweomer5@gmail.com>
2024-08-05 10:11:40 -07:00
dependabot[bot]
3f64914097 bump github.com/docker/docker in the go_modules group across 1 directory (#281)
Bumps the go_modules group with 1 update in the / directory: [github.com/docker/docker](https://github.com/docker/docker).

Updates `github.com/docker/docker` from 25.0.5+incompatible to 25.0.6+incompatible
- [Release notes](https://github.com/docker/docker/releases)
- [Commits](https://github.com/docker/docker/compare/v25.0.5...v25.0.6)

---
updated-dependencies:
- dependency-name: github.com/docker/docker
  dependency-type: indirect
  dependency-group: go_modules
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Zack Brady <zackbrady123@gmail.com>
2024-08-01 17:51:34 -04:00
Zack Brady
6a74668e2c updated install script (install.sh) (#280)
* removed sudo requirement from install script
* updated install script to specify install directory
* cleaned up install script
* a bit more changes and updates to the install script
* updated install script variable syntax
* added missing logic to install script
2024-08-01 17:48:26 -04:00
Kamin Fay
0c5cf20e87 fix digest images being lost on load of hauls (Signed). (#259)
* Adding oci tests
* Fixed the oci pusher to split on the correct '@' symbol
* Reverted Pusher changes and adjusted nameMap references in Index

---------

Co-authored-by: Jacob Blain Christen <dweomer5@gmail.com>
2024-07-30 23:44:47 -04:00
will
513719bc9e feat: add readonly flag (#277)
* updated flag from writeable to readonly (default=true)

---------

Signed-off-by: Jacob Blain Christen <dweomer5@gmail.com>
Co-authored-by: Zack Brady <zackbrady123@gmail.com>
Co-authored-by: Jacob Blain Christen <dweomer5@gmail.com>
2024-07-30 11:26:39 -07:00
Zack Brady
047b7a7003 fixed makefile for goreleaser v2 changes (#278) 2024-07-30 13:18:17 -04:00
Zack Brady
a4685169c6 updated goreleaser versioning defaults (#279) 2024-07-30 13:17:40 -04:00
Jacob Blain Christen
47549615c4 update feature_request.md (#274)
- `[RFE]` ➡️ `[feature]`

Signed-off-by: Jacob Blain Christen <dweomer5@gmail.com>
2024-07-26 14:48:08 -04:00
Zack Brady
2d725026dc merge pull request #271 from zackbradys/removed-some-references
removed some old references
2024-07-23 13:56:59 -04:00
Zack Brady
60667b7116 updated old references 2024-07-22 18:50:11 -04:00
Zack Brady
7d62a1c98e updated actions workflow user 2024-07-22 18:46:53 -04:00
Zack Brady
894ffb1533 merge pull request #269 from zackbradys/add-dockerhub-support
added dockerhub to github actions workflow
2024-07-20 01:18:08 -04:00
Zack Brady
78b3442d23 added dockerhub to github actions workflow 2024-07-20 01:12:59 -04:00
Zack Brady
cd46febb6b merge pull request #268 from zackbradys/remove-helm-chart
removed helm chart
2024-07-20 01:10:37 -04:00
Zack Brady
0957a930dd removed helm chart 2024-07-20 00:57:25 -04:00
Zack Brady
a6bc6308d9 merge pull request #267 from zackbradys/main
added debug container and workflow
2024-07-19 22:21:38 -04:00
Zack Brady
1304cf6c76 added debug container and workflow 2024-07-17 00:01:24 -04:00
Zack Brady
f2e02c80c0 merge pull request #262 from zackbradys/main
updated products flag description
2024-07-12 22:52:43 -04:00
Zack Brady
25806e993e updated products flag description 2024-07-12 01:25:05 -04:00
Zack Brady
05e67bc750 merge pull request #255 from zackbradys/main
updated chart for release
2024-06-25 23:27:24 -04:00
Zack Brady
b43ed0503a updated chart for release 2024-06-25 23:14:44 -04:00
Zack Brady
27e2fc9de0 merge pull request #254 from zackbradys/main
fixed workflow errors/warnings
2024-06-25 22:48:30 -04:00
Zack Brady
d32d75b93e fixed workflow errors/warnings 2024-06-25 22:43:31 -04:00
Zack Brady
ceb77601d0 merge pull request #252 from zackbradys/main
overhauling github actions and workflows
2024-06-24 22:52:08 -04:00
Zack Brady
d90545a9e4 fixed permissions on testdata 2024-06-17 22:23:03 -04:00
Zack Brady
bef141ab67 updated chart versions (will need to update again) 2024-06-17 19:45:18 -04:00
Zack Brady
385d767c2a last bit of fixes to workflow 2024-06-17 19:42:20 -04:00
Zack Brady
22edc77506 updated unit test workflow 2024-06-14 20:56:37 -04:00
Zack Brady
9058797bbc updated goreleaser deprecations 2024-06-14 20:43:32 -04:00
Zack Brady
35e2f655da added helm chart release job 2024-06-14 20:37:58 -04:00
Zack Brady
f5c0f6f0ae updated github template names 2024-06-14 20:04:01 -04:00
Zack Brady
0ec77b4168 merge pull request #248 from zackbradys/main
formatted all code with `go fmt`
2024-06-14 16:46:21 -04:00
Zack Brady
7a7906b8ea updated imports (and go fmt) 2024-06-13 23:44:06 -04:00
Zack Brady
f4774445f6 merge pull request #240 from pat-earl/doc_updates
added some documentation text to sync command
2024-06-10 18:13:09 -04:00
Zack Brady
d59b29bfce formatted gitignore to match dockerignore 2024-06-05 14:40:50 -04:00
Zack Brady
fd702202ac formatted all code (go fmt) 2024-06-05 08:25:45 -04:00
Adam Martin
9e9565717b Merge pull request #245 from ethanchowell/ehowell/helm-client
Configure chart commands to use helm clients for OCI and private regi…
2024-06-04 13:16:42 -04:00
Zack Brady
bfe47ae141 updated chart tests for new features 2024-06-03 23:31:50 -04:00
Zack Brady
ebab7f38a0 merge pull request #247 from kaminfay/dev/add-fileserver-timeout-flag
adding the timeout flag for fileserver command and setting default timeout to 60 seconds
2024-06-03 22:33:36 -04:00
Kamin Fay
f0cba3c2c6 Adding the timeout flag for fileserver command 2024-05-28 15:28:20 -04:00
Ethan Howell
286120da50 Configure chart commands to use helm clients for OCI and private registry support 2024-05-24 12:06:16 -04:00
Patrick Earl
dcdeb93518 Added some documentation text to sync command 2024-05-02 14:17:38 -04:00
Adam Martin
f7c24f6129 Merge pull request #235 from rancherfederal/dependabot/go_modules/golang.org/x/net-0.23.0
Bump golang.org/x/net from 0.17.0 to 0.23.0
2024-04-23 16:40:45 -04:00
Adam Martin
fe88d7033c Merge pull request #234 from amartin120/dup-digest-bugfix
fix for dup digest smashing in cosign
2024-04-23 15:49:54 -04:00
dependabot[bot]
ef31984c97 Bump golang.org/x/net from 0.17.0 to 0.23.0
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.17.0 to 0.23.0.
- [Commits](https://github.com/golang/net/compare/v0.17.0...v0.23.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-04-23 19:46:05 +00:00
Adam Martin
2889f30275 fix for dup digest smashing in cosign
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-04-23 14:06:40 -04:00
Zack Brady
0674e0ab30 merge pull request #229 from zackbradys/vagrant
removed vagrant scripts
2024-04-15 09:36:24 -04:00
Zack Brady
d645c52135 removed vagrant scripts 2024-04-14 08:45:51 -04:00
Zack Brady
44baab3213 merge pull request #227 from zackbradys/helmifying
helmifying hauler
2024-04-11 22:02:37 -04:00
Zack Brady
1a317b0172 merge pull request #226 from zackbradys/testdata
updated hauler testdata
2024-04-11 21:57:37 -04:00
Zack Brady
128cb3b252 last bit of updates and formatting of chart 2024-04-07 00:23:49 -04:00
Zack Brady
91ff998634 updated hauler testdata 2024-04-06 15:19:53 -04:00
Zack Brady
8ac1ecaf29 adding functionality and cleaning up 2024-04-06 02:06:33 -04:00
Zack Brady
7447aad20a added initial helm chart 2024-04-06 00:28:59 -04:00
Zack Brady
003456d8ab merge pull request #225 from zackbradys/main
removed tag in release workflow
2024-04-05 21:36:26 -04:00
Zack Brady
f44b8b93af removed tag in release workflow 2024-04-05 21:32:58 -04:00
Zack Brady
e405840642 merge pull request #224 from zackbradys/main
fixed image ref in release workflow
2024-04-05 20:45:53 -04:00
Zack Brady
8c9aa909b0 updated/fixed image ref in release workflow 2024-04-05 20:43:36 -04:00
Zack Brady
8670489520 merge pull request #223 from zackbradys/main
fixed platforms in release workflow
2024-04-05 20:04:57 -04:00
Zack Hodgson Brady
f20d4052a4 updated/fixed platforms in release workflow 2024-04-05 20:02:00 -04:00
Zack Brady
c84bca43d2 updated/cleaned github actions (#222)
* cleaned up unit test workflow
* updated/cleaned up release workflow
* fixed workflow typo
2024-04-05 16:12:49 -07:00
Claus Albøge
6863d91f69 Make Product Registry configurable (#194)
Co-authored-by: Claus Albøge <ca@netic.dk>
2024-04-05 14:40:26 -07:00
Zack Brady
16eea6ac2a updated fileserver directory name (#219) 2024-04-05 14:36:39 -07:00
Adam Martin
f6f227567c Merge pull request #221 from amartin120/fix-file-logging
fix logging for files
2024-04-01 12:45:58 -04:00
Adam Martin
eb810c16f5 fix logging for files
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-04-01 12:23:05 -04:00
Adam Martin
b18f55ea60 Merge pull request #220 from amartin120/temp-override
temp dir override for `hauler store load`
2024-04-01 10:40:31 -04:00
Adam Martin
4bbe622073 add extra info for the tempdir override flag
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-04-01 09:39:34 -04:00
Adam Martin
ea5bcb36ae tempdir override flag for load
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-04-01 09:34:26 -04:00
Adam Martin
5c7daddfef Merge pull request #218 from amartin120/remove-cache-flag
deprecate misleading cache flag from store
2024-03-29 13:48:51 -04:00
Adam Martin
7083f3a4f3 deprecate the cache flag instead of remove
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-03-29 13:40:24 -04:00
Adam Martin
8541d73a0d Merge pull request #217 from amartin120/logging-updates
better logging when adding to store
2024-03-29 11:36:45 -04:00
Adam Martin
49d705d14c Merge pull request #216 from amartin120/cosign-update
update to v2.2.3 of RGS cosign fork
2024-03-29 11:36:29 -04:00
Clayton Castro
722851d809 Merge pull request #215 from clanktron/container
add container definition
2024-03-28 16:20:35 -07:00
clayton
82aedc867a switch to using bci-golang as builder image 2024-03-28 13:53:22 -07:00
clayton
e8fb37c6ed fix: ensure /tmp for hauler store load 2024-03-28 13:49:01 -07:00
Adam Martin
545b3f8acd added the copy back for now
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-03-28 16:16:05 -04:00
Adam Martin
3ae92fe20a remove copy at the image sync not needed with cosign update
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-03-28 15:00:00 -04:00
Adam Martin
35538bf45a removed misleading cache flag
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-03-28 14:24:37 -04:00
Adam Martin
b6701bbfbc better logging when adding to store
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-03-28 14:16:22 -04:00
Adam Martin
14738c3cd6 update to v2.2.3 of our cosign fork
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-03-28 13:11:26 -04:00
clayton
0657fd80fe add: dockerignore 2024-03-27 02:25:12 -07:00
clayton
d132e8b8e0 add: Dockerfile 2024-03-27 02:25:06 -07:00
Adam Martin
29367c152e Merge pull request #213 from rancherfederal/dependabot/go_modules/google.golang.org/protobuf-1.33.0
Bump google.golang.org/protobuf from 1.31.0 to 1.33.0
2024-03-26 14:58:08 -04:00
Adam Martin
185ae6bd74 Merge pull request #212 from rancherfederal/dependabot/go_modules/github.com/docker/docker-25.0.5incompatible
Bump github.com/docker/docker from 25.0.1+incompatible to 25.0.5+incompatible
2024-03-26 14:57:57 -04:00
dependabot[bot]
b6c78d3925 Bump google.golang.org/protobuf from 1.31.0 to 1.33.0
Bumps google.golang.org/protobuf from 1.31.0 to 1.33.0.

---
updated-dependencies:
- dependency-name: google.golang.org/protobuf
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-03-26 13:04:41 +00:00
dependabot[bot]
e718d40744 Bump github.com/docker/docker
Bumps [github.com/docker/docker](https://github.com/docker/docker) from 25.0.1+incompatible to 25.0.5+incompatible.
- [Release notes](https://github.com/docker/docker/releases)
- [Commits](https://github.com/docker/docker/compare/v25.0.1...v25.0.5)

---
updated-dependencies:
- dependency-name: github.com/docker/docker
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-03-26 13:03:05 +00:00
Zack Brady
1505bfb3af merge pull request #207 from zackbradys/main
updated and added new logos
2024-03-20 08:17:00 -04:00
Zack Hodgson Brady
e27b5b3cd1 updated and added new logos 2024-03-19 18:22:21 -04:00
Zack Brady
0472c8fc65 merge pull request #203 from zackbradys/main
updated github files
2024-03-11 09:28:08 -04:00
Zack Hodgson Brady
70a48f2efe updated github files 2024-03-10 16:45:41 -04:00
Adam Martin
bb2a8bfbec Merge pull request #197 from fgiudici/file_name_option
Fix --name option in "store add file" command
2024-02-27 08:33:09 -05:00
Adam Martin
2779c649c2 Merge pull request #195 from rancherfederal/dependabot/go_modules/helm.sh/helm/v3-3.14.2
Bump helm.sh/helm/v3 from 3.14.1 to 3.14.2
2024-02-27 08:00:57 -05:00
Francesco Giudici
8120537af2 Fix --name option in "store add file" command
Fixes: #196

Signed-off-by: Francesco Giudici <francesco.giudici@suse.com>
2024-02-27 09:54:48 +01:00
dependabot[bot]
9cdab516f0 Bump helm.sh/helm/v3 from 3.14.1 to 3.14.2
Bumps [helm.sh/helm/v3](https://github.com/helm/helm) from 3.14.1 to 3.14.2.
- [Release notes](https://github.com/helm/helm/releases)
- [Commits](https://github.com/helm/helm/compare/v3.14.1...v3.14.2)

---
updated-dependencies:
- dependency-name: helm.sh/helm/v3
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-02-27 00:21:10 +00:00
Adam Martin
d136d1bfd2 Merge pull request #191 from atanasdinov/cosign-error-code
Exit with status code 1 if cosign is not configured
2024-02-23 08:49:25 -05:00
Atanas Dinov
003560c3b3 Exit with status code 1 if cosign is not configured 2024-02-23 10:39:47 +02:00
Adam Martin
1b9d057f7a Merge pull request #188 from amartin120/registry-flag
add registry flag to sync cli to match annotation functionality
2024-02-22 08:57:44 -05:00
Adam Martin
2764e2d3ea Merge pull request #187 from amartin120/fix-exitcode
fix exit code on error
2024-02-22 08:57:31 -05:00
Zack Brady
360049fe19 reverting changes for logos (#189) 2024-02-21 14:35:07 -07:00
Brandon
79b240d17f Merge pull request #186 from rancherfederal/bdev
adding graphics to README.md
2024-02-20 06:50:36 -05:00
bgulla
214704bcfb adding graphics 2024-02-20 06:46:51 -05:00
Adam Martin
ef73fff01a fix exit code on error
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-02-19 16:45:04 -05:00
Adam Martin
0c6fdc86da add registry flag to cli for sync 2024-02-19 10:10:47 -05:00
Zack Brady
7fb537a31a merge pull request #184 from zackbradys/main
updated `readme` and `install.sh` for hauler `v1.0.0`
2024-02-17 22:12:44 -05:00
Zack Brady
6ca7fb6255 updated readme and removed roadmap 2024-02-17 15:00:59 -05:00
Zack Brady
d70a867283 updated/cleaned up install.sh 2024-02-17 10:53:10 -05:00
Adam Martin
46ea8b5df9 Merge pull request #180 from amartin120/remove-deprecated-cmds
remove deprecated commands
2024-02-17 09:09:05 -05:00
Adam Martin
5592ec0f88 remove deprecated commands
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-02-16 09:10:29 -05:00
Adam Martin
e8254371c0 Merge pull request #177 from amartin120/add-login
add login command
2024-02-16 08:16:58 -05:00
Adam Martin
8d2a84d27c Merge pull request #179 from rancherfederal/dependabot/go_modules/helm.sh/helm/v3-3.14.1
Bump helm.sh/helm/v3 from 3.14.0 to 3.14.1
2024-02-16 08:14:00 -05:00
Adam Martin
72734ecc76 Merge pull request #178 from amartin120/bug-file-name
bug-fix: handle complex file names
2024-02-16 08:13:28 -05:00
dependabot[bot]
4759879a5d Bump helm.sh/helm/v3 from 3.14.0 to 3.14.1
Bumps [helm.sh/helm/v3](https://github.com/helm/helm) from 3.14.0 to 3.14.1.
- [Release notes](https://github.com/helm/helm/releases)
- [Commits](https://github.com/helm/helm/compare/v3.14.0...v3.14.1)

---
updated-dependencies:
- dependency-name: helm.sh/helm/v3
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-02-16 04:48:57 +00:00
Adam Martin
dbcfe13fb6 bug-fix: handle complex file names
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-02-15 19:55:38 -05:00
Adam Martin
cd8d4f6e46 add login command
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-02-15 15:54:23 -05:00
Adam Martin
e15c8d54fa Merge pull request #176 from amartin120/info-totals
update to add size totals and cosign artifacts to the `info` command
2024-02-15 12:39:17 -05:00
Adam Martin
ccd529ab48 update to add size totals and cosign bits to the info command
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-02-14 13:03:36 -05:00
Adam Martin
3cf4afe6d1 Merge pull request #173 from amartin120/sync-annotations
image spec manifest annotations - key/platform/registry
2024-02-12 13:10:13 -05:00
Adam Martin
0c55d00d49 switch the 'apply the registry override first in a image sync
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-02-11 10:58:31 -05:00
Adam Martin
6c2b97042e switch the 'not a multi-arch image' log message to be debug
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-02-11 10:37:40 -05:00
Adam Martin
be22e56f27 fix whitspace issue
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-02-10 23:32:42 -05:00
Adam Martin
c8ea279c0d add better logging for save
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-02-10 23:30:34 -05:00
Adam Martin
59ff02b52b add annotations for registry
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-02-10 22:38:11 -05:00
Adam Martin
8b3398018a add annotations for key and platform
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-02-10 21:07:29 -05:00
Adam Martin
ae80b482e4 Merge pull request #168 from amartin120/dep-updates
dependency bumps for security vuln fixes
2024-01-30 16:51:48 -05:00
Adam Martin
1ae496fb8b dep bumps for security vuln fixes
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-01-30 14:41:33 -05:00
Adam Martin
7919dccffc Merge pull request #167 from amartin120/prerelease-flag
release process checks tag to determine pre-release
2024-01-30 11:11:13 -05:00
Adam Martin
fc7a19c755 check tag to determine pre-release
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-01-30 10:57:40 -05:00
Adam Martin
ade0feccf0 Merge pull request #166 from clemenko/main
Update install.sh for file cleaning
2024-01-30 09:04:41 -05:00
Andy Clemenko
f78fdf5e3d Update install.sh
adding the old hauler binary to the cleanup
2024-01-30 08:55:57 -05:00
Andy Clemenko
85d6bc0233 Update install.sh for file cleaning
removing LICENSE and README.md files.
2024-01-30 08:41:07 -05:00
Adam Martin
d1499b7738 Merge pull request #164 from amartin120/cosign-updates
Add `--platform` flag to image processes and RGS flavored cosign setup improvement.
2024-01-29 14:46:18 -05:00
Adam Martin
27acb239e4 clean up makefile 2024-01-29 13:41:53 -05:00
Adam Martin
e8d084847d remove extra debug statement
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-01-28 21:15:27 -05:00
Adam Martin
e70379870f another fix for the unit test gh action
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-01-28 19:51:22 -05:00
Adam Martin
a05d21c052 add platform flag for image add and sync
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-01-28 19:48:16 -05:00
Adam Martin
8256aa55ce adjust unit test gh action for latest updates 2024-01-28 19:46:55 -05:00
Adam Martin
0e6c3690b1 bump cosign version to v2.2.2+carbide.2 2024-01-28 19:45:05 -05:00
Adam Martin
a977cec50c improve cosign setup
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2024-01-28 12:08:31 -05:00
Adam Martin
5edc96d152 Merge pull request #162 from zackbradys/main
updated archive default name
2024-01-24 09:19:48 -05:00
Zack Hodgson Brady
fbafa60da5 updated archive default name 2024-01-23 22:49:20 -05:00
Adam Martin
cc917af0f2 Merge pull request #159 from amartin120/store-fileserver
Store fileserver
2024-01-22 15:12:45 -05:00
Adam Martin
f76160d8be Merge pull request #160 from amartin120/add-license
add license file
2024-01-22 15:12:05 -05:00
Adam Martin
b24b25d557 add license file 2024-01-22 15:06:09 -05:00
Adam Martin
d9e298b725 adjust to make registry and fileserver subcommands 2024-01-22 13:40:58 -05:00
Adam Martin
e14453f730 add fileserver option for store serve 2024-01-22 11:31:46 -05:00
Zack Brady
990ade9cd0 merge pull request #152 from zackbradys/main
updated readme and hauler `install.sh`
2023-12-20 19:56:57 -05:00
Zack Hodgson Brady
aecd37d192 added homebrew install instructions 2023-12-20 19:46:55 -05:00
Zack Brady
02f4946ead Merge branch 'rancherfederal:main' into main 2023-12-20 00:31:44 -05:00
Zack Hodgson Brady
978dc659f8 updated hauler version and automated default version 2023-12-19 21:24:04 -05:00
Adam Martin
f982f51d57 Merge pull request #150 from amartin120/info-type-filter
add simple type filter to store info
2023-12-19 13:07:46 -05:00
Adam Martin
2174e96f0e add simple type filter to store info
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-12-19 09:59:06 -05:00
Adam Martin
8cfe4432fc Merge pull request #149 from amartin120/registry-serve-fix
fix for validating foreign blobs
2023-12-18 15:51:38 -05:00
Adam Martin
f129484224 Merge pull request #148 from amartin120/fix-chart-tags
fix for charts with a + in the version
2023-12-18 15:51:20 -05:00
Adam Martin
4dbff83459 fix for validating foreign blobs
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-12-18 15:27:32 -05:00
Adam Martin
e229c2a1da fix for chart tags with a +
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-12-15 16:17:34 -05:00
Zack Brady
2a93e74b62 merge pull request #147 from zackbradys/main
updated/fixed install.sh
2023-12-14 23:36:53 -05:00
Zack Hodgson Brady
4d5d9eda7b updated readme for hauler install 2023-12-14 23:05:01 -05:00
Zack Hodgson Brady
a7cbfcb042 updated/fixed hauler install.sh 2023-12-14 23:04:36 -05:00
Adam Martin
7751b12e5e Merge pull request #146 from amartin120/more-updates-0.4.1
Improved logging for store copy / Updated store info to handle multi-arch images
2023-12-14 15:05:24 -05:00
Adam Martin
6e3d3fc7b8 updated store info to handle multi arch images
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-12-14 11:15:37 -05:00
Adam Martin
0f7f363d6c improved logging for hauler store copy
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-12-11 18:15:34 -05:00
Adam Martin
ab975a1dc7 Merge pull request #144 from amartin120/add-autocompletion
add autocompletion
2023-12-05 12:19:01 -05:00
Adam Martin
2d92d41245 Merge pull request #142 from amartin120/performance-fix
performance fix / version display improvement
2023-12-05 12:18:34 -05:00
Adam Martin
e2176d211a keep consistent with other subcommands
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-12-05 11:29:01 -05:00
Adam Martin
93ae968580 add autocompletion
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-12-05 10:37:29 -05:00
Adam Martin
b0a37d21af performance fix for images
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-12-04 11:19:57 -05:00
Adam Martin
aa16575c6f cleaned up version command more
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-12-04 11:19:43 -05:00
Adam Martin
2959cfc346 Merge pull request #141 from amartin120/goreleaser-versioning-fix
fix hauler version display
2023-11-30 14:01:14 -05:00
Adam Martin
c04211a55e Merge pull request #140 from amartin120/retry-logic
Retry logic / Auth Flag Fix / Sync Cleanup
2023-11-30 14:00:31 -05:00
Adam Martin
c497f53972 fix hauler version display
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-11-30 13:39:23 -05:00
Adam Martin
f1fbd7e9c2 don't flush store on each sync
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-11-30 10:02:04 -05:00
Adam Martin
f348fb8d4d registry auth fix for copy
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-11-28 22:29:00 -05:00
Adam Martin
fe60b1fd1a add retry logic
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-11-28 10:02:21 -05:00
Zack Brady
756c0171c3 merge pull request #139 from zackbradys/main
added new installation method (`install.sh`)
2023-11-16 14:01:06 -05:00
Zack Hodgson Brady
c394965f88 more improvements to script 2023-11-12 17:18:41 -05:00
Zack Hodgson Brady
43e2dc56ec upgraded install script functionality 2023-11-12 03:50:32 -05:00
Zack Hodgson Brady
795a88218f updated readme for new install script 2023-11-12 02:48:28 -05:00
Zack Hodgson Brady
ec2ada9dcb cleaned up install script variables 2023-11-12 00:26:28 -05:00
Zack Hodgson Brady
45cea89752 added initial install script 2023-11-12 00:06:49 -05:00
Adam Martin
6062c20e02 Merge pull request #138 from rancherfederal/fix-github-path
fix carbide cosign repo path and perms
2023-11-06 09:08:41 -05:00
Adam Martin
be486df762 fix carbide cosign repo path and perms
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-11-06 09:07:13 -05:00
Adam Martin
4d950f7b0a Add OCI hauler manifests. (#136)
* pull carbide flavored hauler manifests from reg
* remove temp constant
* remove temp hardcoding
* add comments for new sync flags
* fixes for version and registry serve
* band-aid for store info... needs love
* add sbom to info logic
* adjust a few text descriptions
* adjust tag names with +
* removed testing file

Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-11-03 12:44:05 -07:00
Adam Martin
f8c16a1a24 Merge pull request #135 from rancherfederal/cosign-verify
Add cosign verify functionality.
2023-11-03 15:27:48 -04:00
Adam Martin
6e8c7db81f Merge branch 'main' of github.com:rancherfederal/hauler into cosign-verify 2023-11-03 13:56:21 -04:00
Adam Martin
4772657548 Add cosign for handling image functionality. (#134)
* pull back in ocil
* updates to OCIL funcs to handle cosign changes
* add cosign logic
* adjust Makefile to be a little more generic
* cli updates to accomodate the cosign additions
* add cosign drop-in funcs
* impl for cosign functions for images & store copy
* fixes and logging for cosign verify <iamge>
* fix cosign verify logging
* update go.mod

Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-11-03 10:43:32 -07:00
Zack Brady
337494cefd merge pull request #132 from rancherfederal/zackbradys-readme-updates
readme and docs updates
2023-10-26 00:43:53 -04:00
Zack Brady
865afb4a2d updated readme for extra info 2023-10-26 00:42:58 -04:00
Zack Brady
d8b0193a92 merge pull request #133 from rancherfederal/zackbradys-github-updates
updated github templates
2023-10-25 18:01:34 -04:00
Zack Brady
b616f54085 updated readme for deprecated commands
Co-authored-by: Jacob Blain Christen <dweomer5@gmail.com>
2023-10-25 17:03:35 -04:00
Zack Brady
870f2ebda8 last typo fixes 2023-10-21 02:37:42 -04:00
Zack Brady
b7a8fc0a60 fixed typos 2023-10-20 12:32:31 -04:00
Zack Brady
04c97b8a97 fixed typos 2023-10-20 12:22:10 -04:00
Zack Brady
d46ccd03a5 updated github templates 2023-10-20 04:59:51 -04:00
Zack Brady
99288f9b9d removed old docs 2023-10-20 03:56:01 -04:00
Zack Brady
2cc5e902ad updated readme 2023-10-20 03:49:43 -04:00
Adam Martin
f2b0c44af3 polish up cosign verify for hauler store sync
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-10-12 12:05:35 -04:00
Adam Martin
356c46fe28 update go.mod
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-10-12 10:34:40 -04:00
Adam Martin
323b93ae20 fix cosign verify logging
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-10-11 13:44:21 -04:00
Adam Martin
bb9a088a84 fixes and logging for cosign verify <iamge>
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-10-11 13:44:21 -04:00
Adam Martin
96d92e3248 impl for cosign functions for images & store copy
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-10-11 13:44:21 -04:00
Adam Martin
220eeedb2c add cosign drop-in funcs
Signed-off-by: Adam Martin <adam.martin@rancherfederal.com>
2023-10-11 13:44:21 -04:00
Adam Martin
3049846a46 cli updates to accomodate the cosign additions 2023-10-11 13:44:21 -04:00
Adam Martin
ece463bc1c adjust Makefile to be a little more generic 2023-10-11 13:44:21 -04:00
Adam Martin
58c55d7aeb add cosign logic 2023-10-11 13:44:21 -04:00
Adam Martin
214ed48829 updates to OCIL funcs to handle cosign changes 2023-10-11 13:43:19 -04:00
Adam Martin
7d6bbbc6fc pull back in ocil 2023-10-11 13:40:42 -04:00
Jacob Blain Christen
995477db22 Merge pull request #131 from rancherfederal/dep-updates
dependency updates
2023-10-11 09:36:25 -07:00
Adam Martin
9862e61f23 update github action deps as well 2023-10-06 15:06:27 -04:00
Adam Martin
fe7122da8a update dependencies 2023-10-06 14:53:17 -04:00
Jacob Blain Christen
2999b90e30 Merge pull request #130 from rancherfederal/deprecate-non-store-stuff
deprecation notices for `dl` and the non-store version of `serve`
2023-09-28 11:51:33 -07:00
Adam Martin
4beb4d4200 deprecation notices for dl and non-store serve 2023-09-27 09:07:33 -04:00
Brandon
4ed1b0a1a4 Update walkthrough.md 2022-08-27 10:40:15 -04:00
Brandon
925ce53aeb Merge pull request #127 from neoakris/content_doc_example
Adding example of imperative generation of declarative config file to doc
2022-04-25 15:52:36 -04:00
Chris McGrath
3888e23907 reworded code comment to be more accurate 2022-04-25 15:26:06 -04:00
Chris McGrath
88f482f4af fixed syntax issue 2022-04-25 15:22:27 -04:00
Chris McGrath
425c92e8a6 added missing 'cat contents.yaml' to example 2022-04-25 15:08:08 -04:00
Chris McGrath
011a4d8725 adding imperative generation of declarative config example to doc 2022-04-25 15:03:39 -04:00
Brandon
c60ccc8085 Merge pull request #116 from noslzzp/main
Update README.md
2022-02-03 18:48:54 -05:00
NoSLZZP
6ebcd5088d Update README.md 2022-02-03 17:23:41 -05:00
Josh Wolf
d8bbb16e6e Merge pull request #110 from joshrwolf/override-files
pre 0.3 general bug fixes
2022-01-25 11:05:56 -07:00
Josh Wolf
105fb3a119 ensure thick charts follow proper reference naming convention 2022-01-25 11:00:26 -07:00
Josh Wolf
c341929a57 add optional args to file name generation and discovery 2022-01-25 08:07:43 -07:00
Josh Wolf
dff591d08b ensure k3s collection contents have default repo specified (#109)
ensure k3s collection contents have default repo specified
2022-01-24 17:03:07 -07:00
Josh Wolf
50b5f87c86 Merge pull request #108 from joshrwolf/helm
update helm dependency to 3.8.0, add support for helm authentication when storing charts
2022-01-24 16:40:35 -07:00
Josh Wolf
320a4af36a add support for helm authentication when storing charts 2022-01-24 16:31:03 -07:00
Josh Wolf
a1be863812 update helm dependency to 3.8.0 2022-01-24 16:29:57 -07:00
Josh Wolf
513175399b add basic configuration for fileserer 2022-01-24 08:24:42 -07:00
Matt Nikkel
c3a0a09216 Merge pull request #92 from nikkelma/image-txt-collection
Add ImageTxt collection
2022-01-20 10:31:12 -05:00
Matt Nikkel
94268e38ba Fix panic on empty target sources map 2022-01-13 13:57:28 -05:00
Matt Nikkel
ac52ad8260 Add ImageTxt tests 2022-01-13 13:57:27 -05:00
Matt Nikkel
597a5aa06d Handle ImageTxts objects in sync subcommand 2022-01-13 13:56:20 -05:00
Matt Nikkel
6d9270106b Add ImageTxt collection + storing logic 2022-01-13 13:43:22 -05:00
Matt Nikkel
cee4bddbc0 Add ImageTxts collection API definition 2022-01-13 13:20:24 -05:00
Josh Wolf
917e686da6 Merge pull request #106 from joshrwolf/ocil
factor out core oci logic into independent library (rancherfederal/ocil)
2022-01-12 11:37:30 -07:00
Josh Wolf
39dc1aac23 ensure charts are always given a version tag 2022-01-12 11:32:26 -07:00
Josh Wolf
8edc4927a8 move store/cache flags from global to store scoped 2022-01-12 10:30:05 -07:00
Josh Wolf
8b372d8a20 factor out core oci logic into independent library (rancherfederal/ocil) 2022-01-12 09:47:09 -07:00
Josh Wolf
96d231efdf Merge pull request #102 from joshrwolf/content-location-tagging
standardize content naming for unnamed content
2021-12-13 15:32:40 -07:00
Josh Wolf
1030ed92a8 add some standardization to referencing unreferenced content 2021-12-13 13:23:08 -07:00
Josh Wolf
313c40bba8 standardize content naming for unnamed content 2021-12-13 12:00:41 -07:00
Josh Wolf
e6596549a3 Merge pull request #100 from joshrwolf/charts
add support for local charts from directory or archives
2021-12-13 11:57:53 -07:00
Josh Wolf
d31a17f411 ensure sync doesn't panic when given invalid or empty yaml content 2021-12-10 18:58:51 -07:00
Josh Wolf
d2d3183ef1 add support for local charts from directory or archives 2021-12-10 10:50:04 -07:00
Josh Wolf
e9bd38ca75 Merge pull request #98 from joshrwolf/oci
improve `store` implementation
2021-12-09 11:31:10 -07:00
Josh Wolf
697a9fe034 ensure each copy test is independent 2021-12-09 11:26:48 -07:00
Josh Wolf
98322f7b28 rename redundant Store.Store to Store.Content 2021-12-09 11:12:37 -07:00
Josh Wolf
7eabbdc0aa restructure cli copy messages to print descriptor information 2021-12-09 11:09:50 -07:00
Josh Wolf
cd93d7aaea make our implementation of oci content store public, remove redundant wrapper Store methods in favor of OCI implementation, add tests for store.Copy*() 2021-12-09 11:09:09 -07:00
Matt Nikkel
4d676c632f Add docs for public content fields 2021-12-08 14:52:09 -05:00
Josh Wolf
352c0141a9 Merge pull request #96 from nikkelma/public-content-types
Make content types pubic, expose configuration fields
2021-12-08 12:46:38 -07:00
Matt Nikkel
40fb078106 Add chart name, repo, version fields 2021-12-08 14:35:30 -05:00
Matt Nikkel
49f9e96576 Add image ref field 2021-12-08 14:35:14 -05:00
Matt Nikkel
fd22f93348 Make file ref field public 2021-12-08 14:34:54 -05:00
Matt Nikkel
822a24d79d Expose image OCI implementor publicly 2021-12-08 14:33:43 -05:00
Matt Nikkel
4e14688a9d Expose file OCI implementor publicly 2021-12-08 14:32:23 -05:00
Josh Wolf
61cbc6f614 Merge pull request #95 from joshrwolf/info
enhance `store info` command to actually show useful information
2021-12-08 11:25:13 -07:00
Josh Wolf
6c1640f694 ensure filetests share a setup/teardown 2021-12-08 11:21:36 -07:00
Josh Wolf
8e4d3bee01 refactor cli command to properly output with more informative info 2021-12-08 11:01:43 -07:00
Josh Wolf
1d7ea22bb0 ensure content type for files is properly detected by getter, add test verifying this 2021-12-08 11:01:08 -07:00
Josh Wolf
85ae4205cd remove store.List in favor of store.Walk, restructure store.Walk to walk index descriptors instead of manifests 2021-12-08 11:00:32 -07:00
Josh Wolf
e6e7ff6317 Merge pull request #87 from joshrwolf/oci-layout
refactor store/transport to use oci-layouts
2021-12-08 09:36:44 -07:00
Josh Wolf
395547ff90 better default support for registries requiring auth, and configurable for non-keychain uses 2021-12-08 09:33:21 -07:00
Josh Wolf
bb83d5ce5b allow file content to be passed a custom config 2021-12-08 09:25:45 -07:00
Josh Wolf
49f7b5ea0e add more public methods for building config files from any marshallable source 2021-12-08 09:25:27 -07:00
Josh Wolf
97341fd9b1 change default mappers behavior to failsafe (to filestore or nil) 2021-12-08 09:25:01 -07:00
Josh Wolf
a6831454e5 use internal oci store for store content backing 2021-12-08 09:24:16 -07:00
Josh Wolf
e812c2107c embrace the thick chart 2021-12-03 23:21:20 -07:00
Josh Wolf
a8e9d853db update dependencies to play nicely with controller-manager 2021-12-03 23:10:55 -07:00
Josh Wolf
9d5fae4c1d fix download/extract to use MapperStore 2021-12-03 20:19:55 -07:00
Josh Wolf
bdbac0a460 Merge branch 'main' into oci-layout 2021-12-03 14:20:03 -07:00
Josh Wolf
d55e7572e6 remove custom file store in favor of less hacky IoContentWriter extended on top of existing file store 2021-12-03 14:01:06 -07:00
Josh Wolf
c7ae551e6f move types to constants 2021-12-03 14:00:20 -07:00
Josh Wolf
f324078efc Merge pull request #85 from rancherfederal/fix-list-paging
Fix list request to registry to properly page
2021-12-02 09:48:05 -07:00
Josh Wolf
f0abcf162a move servers to internal, we're not blowing any minds here 2021-12-02 08:12:26 -07:00
Josh Wolf
8e692eecb4 add codecov 2021-12-01 23:01:14 -07:00
Josh Wolf
34836dacb0 add getter, store, and file tests 2021-12-01 22:49:16 -07:00
Josh Wolf
5855f79156 allow reference string to be passed to AddArtifact instead of name.ParseReference for ease of use, move reference validation within AddArtifact 2021-12-01 22:49:15 -07:00
Josh Wolf
d27ad7c7e8 add basic store tests 2021-12-01 22:49:15 -07:00
Josh Wolf
3c6ced89a9 Merge branch 'main' into oci-layout 2021-12-01 14:57:46 -07:00
Josh Wolf
d87d8a2041 primary: refactor store and transport to use oci-layouts and add fileserver feature
minors:
* add optional 'extraImages' to ThickCharts
* refactor File content into generic getter interfaces
* refactor artifact.Config into an actual usable interface (by File content)
* refactor 'copy' cli command to use oras mappers
* refactor 'serve' cli command to server registry and/or fileserver
2021-12-01 14:53:06 -07:00
Matt Nikkel
dc02554118 Fix list request to registry to properly page 2021-11-29 19:04:18 -05:00
Josh Wolf
de366c7b9b Merge pull request #74 from rancherfederal/cache-dir-fix
Update wording to conform to XDG cache dir spec
2021-11-19 12:36:58 -07:00
Matt Nikkel
07213d0da6 Update wording to conform to XDG cache dir spec 2021-11-17 12:31:06 -05:00
Josh Wolf
32d24b2b26 Merge pull request #73 from joshrwolf/logging
clean up and standardize logging usages
2021-11-16 12:04:21 -07:00
Josh Wolf
26759a14a2 clean up and standardize logging usages 2021-11-16 12:00:18 -07:00
Josh Wolf
641e76a314 ensure list doesn't prematurely exit on tagless images (#71)
* ensure list doesn't prematurely exit on tagless images
* update testdata examples
2021-11-12 15:26:50 -07:00
Josh Wolf
dfc1cae1c4 Merge pull request #69 from rancherfederal/68
fix bug packaging thick charts
2021-11-12 14:51:01 -07:00
Josh Wolf
707b30d30d fix bug packaging thick charts 2021-11-12 14:50:26 -07:00
Josh Wolf
fc6332d587 update readme, docs, roadmap, and several cli docs (#67)
* update readme, docs, roadmap, and several cli docs

* update dead links
2021-11-12 09:49:28 -07:00
Josh Wolf
49eb9e2527 Merge pull request #66 from rancherfederal/cli-version
add version command to cli
2021-11-11 13:53:58 -07:00
Josh Wolf
83d989ab85 add version command to cli 2021-11-11 13:52:15 -07:00
Josh Wolf
933af22907 mvp rc prep (#65)
* add bootstrap script to k3s collection
* update ci to 1.17.x and temporarily skip tests (:
* remove helmtemplater
* update dependencies
* update releaser
* rename extremely poorly worded gitignore
2021-11-11 12:11:29 -07:00
Josh Wolf
99a9a1c54f Merge pull request #63 from rancherfederal/collections
support `collections` (sets of primitive `contents`)
2021-11-10 20:28:02 -07:00
Josh Wolf
8146a88a5d move cache logic to a store configurable option 2021-11-10 20:26:12 -07:00
Josh Wolf
4ee6129154 add thick chart builtin collection (chart with required images) 2021-11-10 20:11:15 -07:00
Josh Wolf
20cd37e173 add collections type (group of contents), and initial k3s builtin 2021-11-10 19:36:50 -07:00
Josh Wolf
8ab9fd6a38 represent all content as oci layouts (artifact.OCI interface), add blob caching and ephemeral stores (#59)
* represent all content as artifact.OCI interface and manipulate/add all content using oci layouts
* initial brew taps and macos universal binary
* change mediaType to string for better compatibility with other libraries
* ensure config is minimally viable for file/charts
* add transparent layer caching (filesystem) to artifact operations, clean up layer interface used by file/chart
* add store list and store copy commands

Signed-off-by: Josh Wolf <josh@joshwolf.dev>
2021-11-10 10:37:21 -07:00
Josh Wolf
8a46c20db6 Merge pull request #55 from rancherfederal/cli-ux
* cli ux and verbiage cleanup
* add `hauler store add` command
2021-11-01 14:36:24 -07:00
Josh Wolf
cde59cea74 add 'store add' set of commands for content adding 2021-11-01 15:29:08 -06:00
Josh Wolf
786e63f2ef allow config file to be passed to hauler store serve 2021-11-01 14:06:22 -06:00
Josh Wolf
880b296759 Merge pull request #56 from rancherfederal/content-tests
add _basic_ unit tests to each content type
2021-11-01 12:04:03 -07:00
Josh Wolf
4835699746 add _basic_ unit tests to each content type 2021-11-01 13:00:51 -06:00
Josh Wolf
e5384251f2 add cli aliases 2021-11-01 11:22:26 -06:00
Josh Wolf
ffa6943d6d cli ux and verbiage cleanup 2021-11-01 11:10:32 -06:00
Josh Wolf
372af894b3 refactor to baseline on pluggable oci collection/distribution (#41)
refactor to baseline on pluggable oci collection/distribution

Co-authored-by: Josh Wolf <josh@joshwolf.dev>
2021-10-29 15:55:20 -06:00
Josh Wolf
cea46d28fa Merge pull request #31 from rancherfederal/issue-30
bug: fix error when running a package with 0 bundles
2021-06-24 08:31:51 -06:00
Josh Wolf
1ea08063ac Merge pull request #32 from rancherfederal/wips
update readme with more obvious wip
2021-06-24 08:31:23 -06:00
Josh Wolf
2e5a8f897e update readme with more obvious wip 2021-06-24 08:30:50 -06:00
Josh Wolf
39e37cc04a clean up unused move fns 2021-06-24 07:39:20 -06:00
Josh Wolf
25d1c5eda0 bug: fix error when running a package with 0 bundles 2021-06-22 10:10:29 -06:00
Josh Wolf
187f298bed Merge pull request #28 from rancherfederal/ci
add go mod tidy before vetting
2021-06-18 08:21:13 -06:00
Josh Wolf
57f2836a29 add go mod tidy before vetting 2021-06-18 08:20:53 -06:00
152 changed files with 8073 additions and 7426 deletions

8
.dockerignore Normal file
View File

@@ -0,0 +1,8 @@
*
!cmd
!go.mod
!go.sum
!internal
!Makefile
!pkg
!static

View File

@@ -1,31 +1,51 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
name: Bug Report
about: Submit a bug report to help us improve!
title: '[BUG]'
labels: 'bug'
assignees: ''
---
<!-- Thanks for helping us to improve Hauler! We welcome all bug reports. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
<!-- Thank you for helping us to improve Hauler! We welcome all bug reports. Please fill out each area of the template so we can better assist you. Comments like this will be hidden when you submit, but you can delete them if you wish. -->
**Environmental Info:**
**Environmental Info:**
<!-- Provide the output of "uname -a" -->
-
**Hauler Version:**
**System CPU architecture, OS, and Version:**
<!-- Provide the output from "uname -a" on the system where Hauler is installed -->
<!-- Provide the output of "hauler version" -->
**Describe the bug:**
<!-- A clear and concise description of what the bug is. -->
-
**Steps To Reproduce:**
**Describe the Bug:**
**Expected behavior:**
<!-- A clear and concise description of what you expected to happen. -->
<!-- Provide a clear and concise description of the bug -->
**Actual behavior:**
<!-- A clear and concise description of what actually happened. -->
-
**Additional context / logs:**
<!-- Add any other context and/or logs about the problem here. -->
**Steps to Reproduce:**
<!-- Provide a clear and concise way to reproduce the bug -->
-
**Expected Behavior:**
<!-- Provide a clear and concise description of what you expected to happen -->
-
**Actual Behavior:**
<!-- Provide a clear and concise description of what actually happens -->
-
**Additional Context:**
<!-- Provide any other context and/or logs about the bug -->
-

View File

@@ -0,0 +1,33 @@
---
name: Feature Request
about: Submit a feature request for us to improve!
title: '[feature]'
labels: 'enhancement'
assignees: ''
---
<!-- Thank you for helping us to improve Hauler! We welcome all requests for enhancements (RFEs). Please fill out each area of the template so we can better assist you. Comments like this will be hidden when you submit, but you can delete them if you wish. -->
**Is this RFE related to an Existing Problem? If so, please describe:**
<!-- Provide a clear and concise description of the problem -->
-
**Describe Proposed Solution(s):**
<!-- Provide a clear and concise description of what you want to happen -->
-
**Describe Possible Alternatives:**
<!-- Provide a clear and concise description of any alternative solutions or features you've considered -->
-
**Additional Context:**
<!-- Provide a clear and concise description of the problem -->
-

View File

@@ -1,23 +1,36 @@
* **Please check if the PR fulfills these requirements**
- [ ] The commit message follows our guidelines
- [ ] Tests for the changes have been added (for bug fixes / features)
- [ ] Docs have been added / updated (for bug fixes / features)
**Please check below, if the PR fulfills these requirements:**
- [ ] Commit(s) and code follow the repositories guidelines.
- [ ] Test(s) have been added or updated to support these change(s).
- [ ] Doc(s) have been added or updated to support these change(s).
<!-- Comments like this will be hidden when you submit, but you can delete them if you wish. -->
* **What kind of change does this PR introduce?** (Bug fix, feature, docs update, ...)
**Associated Links:**
<!-- Provide any associated or linked related to these change(s) -->
-
* **What is the current behavior?** (You can also link to an open issue here)
**Types of Changes:**
<!-- What is the type of change? Bugfix, Feature, Breaking Change, etc... -->
-
* **What is the new behavior (if this is a feature change)?**
**Proposed Changes:**
<!-- Provide the high level and low level description of your change(s) so we can better understand these change(s) -->
-
* **Does this PR introduce a breaking change?** (What changes might users need to make in their application due to this PR?)
**Verification/Testing of Changes:**
<!-- How can the changes be verified? Provide the steps necessary to reproduce and verify the proposed change(s) -->
-
* **Other information**:
**Additional Context:**
<!-- Provide any additional information, such as if this is a small or large or complex change. Feel free to kick off the discussion by explaining why you chose the solution you did and what alternatives you considered, etc... -->
-

View File

@@ -1,66 +0,0 @@
name: CI
on:
workflow_dispatch:
push:
tags:
- '*'
jobs:
test:
strategy:
matrix:
go-version: [1.16.x]
os: [ubuntu-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: go test ./...
- name: Run lint/vet
run: |
go get -u golang.org/x/lint/golint
golint ./...
go vet ./...
create-release:
needs: test
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
steps:
- name: Download release notes utility
env:
GH_REL_URL: https://github.com/buchanae/github-release-notes/releases/download/0.2.0/github-release-notes-linux-amd64-0.2.0.tar.gz
run: cd /tmp && curl -sSL ${GH_REL_URL} | tar xz && sudo mv github-release-notes /usr/local/bin/
- name: Generate release notes
run: |
echo 'CHANGELOG' > /tmp/release.txt
#github-release-notes -org rancherfederal -repo hauler -since-latest-release -include-author >> /tmp/release.txt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.16
- name: Build and run Hauler package build
run: |
mkdir bin
go build -o bin ./cmd/...
./bin/hauler package build
- name: Run GoReleaser
id: goreleaser
uses: goreleaser/goreleaser-action@v1
with:
version: latest
args: release --release-notes=/tmp/release.txt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

93
.github/workflows/release.yaml vendored Normal file
View File

@@ -0,0 +1,93 @@
name: Release Workflow
on:
workflow_dispatch:
push:
tags:
- '*'
jobs:
go-release:
name: Go Release Job
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
- name: Set Up Go
uses: actions/setup-go@v5
with:
go-version: 1.21.x
- name: Run Go Releaser
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser
version: "~> v2"
args: "release --clean -p 1"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
container-release:
name: Container Release Job
runs-on: ubuntu-latest
needs: [go-release]
timeout-minutes: 30
continue-on-error: true
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
- name: Set Up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Authenticate to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Authenticate to DockerHub Container Registry
uses: docker/login-action@v3
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and Push Release Container to GitHub Container Registry
uses: docker/build-push-action@v5
with:
context: .
target: release
platforms: linux/amd64,linux/arm64
push: true
tags: ghcr.io/${{ github.repository }}:${{ github.ref_name }}, docker.io/hauler/hauler:${{ github.ref_name }}
- name: Build and Push Debug Container to GitHub Container Registry
uses: docker/build-push-action@v5
with:
context: .
target: debug
platforms: linux/amd64,linux/arm64
push: true
tags: ghcr.io/${{ github.repository }}-debug:${{ github.ref_name }}, docker.io/hauler/hauler-debug:${{ github.ref_name }}

43
.github/workflows/unittest.yaml vendored Normal file
View File

@@ -0,0 +1,43 @@
name: Unit Test Workflow
on:
workflow_dispatch:
push:
branches:
- main
pull_request:
branches:
- main
jobs:
unit-test:
name: Unit Tests
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
- name: Set Up Go
uses: actions/setup-go@v5
with:
go-version: 1.21.x
- name: Run Unit Tests
run: |
mkdir -p cmd/hauler/binaries
touch cmd/hauler/binaries/dummy.txt
go test -race -covermode=atomic -coverprofile=coverage.out ./...
- name: Upload Coverage Report
uses: actions/upload-artifact@v4
with:
name: coverage-report
path: coverage.out

20
.gitignore vendored
View File

@@ -1,8 +1,5 @@
# Vagrant
.vagrant
# Editor directories and files
.DS_Store
**/.DS_Store
.idea
.vscode
*.suo
@@ -10,18 +7,13 @@
*.njsproj
*.sln
*.sw?
# old, ad-hoc ignores
*.dir-locals.el
artifacts
local-artifacts
airgap-scp.sh
# test artifacts
*.tar*
# generated
dist/
./bundle/
tmp/
bin/
pkg.yaml
/store/
/registry/
cmd/hauler/binaries

View File

@@ -1,16 +1,52 @@
version: 2
project_name: hauler
before:
hooks:
- go mod tidy
- go mod download
- rm -rf cmd/hauler/binaries
release:
prerelease: auto
make_latest: false
env:
- vpkg=github.com/rancherfederal/hauler/internal/version
- cosign_version=v2.2.3+carbide.2
builds:
- main: cmd/hauler/main.go
goos:
- linux
- darwin
- windows
goarch:
- amd64
- arm64
ldflags:
- -s -w -X {{ .Env.vpkg }}.gitVersion={{ .Version }} -X {{ .Env.vpkg }}.gitCommit={{ .ShortCommit }} -X {{ .Env.vpkg }}.gitTreeState={{if .IsGitDirty}}dirty{{else}}clean{{end}} -X {{ .Env.vpkg }}.buildDate={{ .Date }}
hooks:
pre:
- mkdir -p cmd/hauler/binaries
- wget -P cmd/hauler/binaries/ https://github.com/hauler-dev/cosign/releases/download/{{ .Env.cosign_version }}/cosign-{{ .Os }}-{{ .Arch }}{{ if eq .Os "windows" }}.exe{{ end }}
post:
- rm -rf cmd/hauler/binaries
env:
- CGO_ENABLED=0
flags:
- -tags=containers_image_openpgp containers_image_ostree
release:
extra_files:
- glob: ./pkg.tar.zst
universal_binaries:
- replace: false
changelog:
disable: false
use: git
brews:
- name: hauler
repository:
owner: hauler-dev
name: homebrew-tap
token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}"
directory: Formula
description: "Hauler CLI"

45
Dockerfile Normal file
View File

@@ -0,0 +1,45 @@
# builder stage
FROM registry.suse.com/bci/golang:1.21 AS builder
RUN zypper --non-interactive install make bash wget ca-certificates
COPY . /build
WORKDIR /build
RUN make build
RUN echo "hauler:x:1001:1001::/home/hauler:" > /etc/passwd \
&& echo "hauler:x:1001:hauler" > /etc/group \
&& mkdir /home/hauler \
&& mkdir /store \
&& mkdir /fileserver \
&& mkdir /registry
# release stage
FROM scratch AS release
COPY --from=builder /var/lib/ca-certificates/ca-bundle.pem /etc/ssl/certs/ca-certificates.crt
COPY --from=builder /etc/passwd /etc/passwd
COPY --from=builder /etc/group /etc/group
COPY --from=builder --chown=hauler:hauler /home/hauler/. /home/hauler
COPY --from=builder --chown=hauler:hauler /tmp/. /tmp
COPY --from=builder --chown=hauler:hauler /store/. /store
COPY --from=builder --chown=hauler:hauler /registry/. /registry
COPY --from=builder --chown=hauler:hauler /fileserver/. /fileserver
COPY --from=builder --chown=hauler:hauler /build/bin/hauler /
USER hauler
ENTRYPOINT [ "/hauler" ]
# debug stage
FROM alpine AS debug
COPY --from=builder /var/lib/ca-certificates/ca-bundle.pem /etc/ssl/certs/ca-certificates.crt
COPY --from=builder /etc/passwd /etc/passwd
COPY --from=builder /etc/group /etc/group
COPY --from=builder --chown=hauler:hauler /home/hauler/. /home/hauler
COPY --from=builder --chown=hauler:hauler /build/bin/hauler /bin/hauler
RUN apk --no-cache add curl
USER hauler
WORKDIR /home/hauler

View File

@@ -174,29 +174,4 @@
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
END OF TERMS AND CONDITIONS

View File

@@ -1,20 +1,27 @@
SHELL:=/bin/bash
GO_BUILD_ENV=GOOS=linux GOARCH=amd64
GO_FILES=$(shell go list ./... | grep -v /vendor/)
BUILD_VERSION=$(shell cat VERSION)
BUILD_TAG=$(BUILD_VERSION)
COSIGN_VERSION=v2.2.3+carbide.2
.SILENT:
all: fmt vet install test
build:
rm -rf cmd/hauler/binaries;\
mkdir -p cmd/hauler/binaries;\
wget -P cmd/hauler/binaries/ https://github.com/hauler-dev/cosign/releases/download/$(COSIGN_VERSION)/cosign-$(shell go env GOOS)-$(shell go env GOARCH);\
mkdir bin;\
$(GO_BUILD_ENV) go build -o bin ./cmd/...;\
CGO_ENABLED=0 go build -o bin ./cmd/...;\
build-all: fmt vet
goreleaser build --clean --snapshot
install:
$(GO_BUILD_ENV) go install
rm -rf cmd/hauler/binaries;\
mkdir -p cmd/hauler/binaries;\
wget -P cmd/hauler/binaries/ https://github.com/hauler-dev/cosign/releases/download/$(COSIGN_VERSION)/cosign-$(shell go env GOOS)-$(shell go env GOARCH);\
CGO_ENABLED=0 go install ./cmd/...;\
vet:
go vet $(GO_FILES)

View File

@@ -1,70 +1,44 @@
# Hauler - Kubernetes Air Gap Migration
# Rancher Government Hauler
## WARNING - Work In Progress
![rancher-government-hauler-logo](/static/rgs-hauler-logo.png)
Hauler is a tool designed to ease the burden of working with containers and kubernetes in an airgap. Several components of hauler are used in unison to provide airgap utilities.
## Airgap Swiss Army Knife
Hauler's utility is split into a few commands intended to solve increasingly complex airgapped use cases.
> ⚠️ **Please Note:** Hauler and the Hauler Documentation are recently Generally Available (GA).
__Portable self contained clusters__:
`Rancher Government Hauler` simplifies the airgap experience without requiring operators to adopt a specific workflow. **Hauler** simplifies the airgapping process, by representing assets (images, charts, files, etc...) as content and collections to allow operators to easily fetch, store, package, and distribute these assets with declarative manifests or through the command line.
Within the `hauler package` subset of commands, `Packages` (name to be finalized) can be created, updated, and ran.
`Hauler` does this by storing contents and collections as OCI Artifacts and allows operators to serve contents and collections with an embedded registry and fileserver. Additionally, `Hauler` has the ability to store and inspect various non-image OCI Artifacts.
A `Package` is a hauler specific, configurable, self-contained, compressed archive (`*.tar.zst`) that contains all dependencies needed to 1) create a kubernetes cluster, 2) deploy resources into the cluster.
```bash
# Build a minimal portable k8s cluster
hauler package build
# Build a package that deploys resources when deployed
hauler package build -p path/to/chart -p path/to/manifests -i extra/image:latest -i busybox:musl
# Build a package that deploys a cluster, oci registry, and sample app on boot
# Note the aliases introduced
hauler pkg b -p testdata/docker-registry -p testdata/rawmanifests
```
Hauler packages at their core stand on the shoulders of other technologies (`k3s`, `rke2`, and `fleet`), and as such, are designed to be extremely flexible.
Common use cases are to build turn key, appliance like clusters designed to boot on disconnected or low powered devices. Or portable "utility" clusters that can act as a stepping stone for further downstream deployable infrastructure. Since ever `Package` is built as an entirely self contained archive, disconnected environments are _always_ a first class citizen.
__Image Relocation__:
For disconnected workloads that don't require a cluster to be created first, images can be efficiently packaged and relocated with `hauler relocate`.
Images are stored as a compressed archive of an `oci` layout, ensuring only the required de-duplicated image layers are packaged and transferred.
For more information, please review the **[Hauler Documentation](https://hauler.dev)!**
## Installation
Hauler is and will always be a statically compiled binary, we strongly believe in a zero dependency tool is key to reducing operational complexity in airgap environments.
Before GA, hauler can be downloaded from the releases page for every tagged release
## Dev
A `Vagrant` file is provided as a testing ground. The boot scripts at `vagrant-scripts/*.sh` will be ran on boot to ensure the dev environment is airgapped.
### Linux/Darwin
```bash
vagrant up
vagrant ssh
# installs latest release
curl -sfL https://get.hauler.dev | bash
```
More info can be found in the [vagrant docs](VAGRANT.md).
## WIP Warnings
API stability (including as a code library and as a network endpoint) is NOT guaranteed before `v1` API definitions and a 1.0 release. The following recommendations are made regarding usage patterns of hauler:
- `alpha` (`v1alpha1`, `v1alpha2`, ...) API versions: use **_only_** through `haulerctl`
- `beta` (`v1beta1`, `v1beta2`, ...) API versions: use as an **_experimental_** library and/or API endpoint
- `stable` (`v1`, `v2`, ...) API versions: use as stable CLI tool, library, and/or API endpoint
### Build
### Homebrew
```bash
# Current arch build
make build
# installs latest release
brew tap hauler-dev/homebrew-tap
brew install hauler
```
# Multiarch dev build
goreleaser build --rm-dist --snapshot
```
### Windows
```bash
# coming soon
```
## Acknowledgements
`Hauler` wouldn't be possible without the open-source community, but there are a few projects that stand out:
- [oras cli](https://github.com/oras-project/oras)
- [cosign](https://github.com/sigstore/cosign)
- [go-containerregistry](https://github.com/google/go-containerregistry)

View File

@@ -1,42 +0,0 @@
# Hauler Roadmap
## v0.0.x
- Install single-node k3s cluster into an Ubuntu machine using the tarball installation method
## v0.1.0
- Install single-node k3s cluster
- Support tarball and rpm installation methods
- Target narrow set of known Operating Systems to have OS-specific code if needed
- Serve container images
- Collect images from image list file
- Collect images from image archives
- Deploy docker registry
- Populate registry with all images
- Serve git repositories
- Collect repos
- Deploy git server (Caddy? NGINX?)
- Populate git server with repos
- Serve files
- Collect files from directory, including subdirectories
- Deploy caddy file server
- Populate file server with directory contents
- NOTE: "generic" option - most other use cases can be satisfied by a specially crafted file
server directory
## Potential future features
- Helm charts
- Pull charts, migrate chart artifacts
- Analyze required container images, add to dependency list
- Yum repo
- Provide package list, collect all dependencies
- Deploy fully configured yum repo into file server
- Deploy Minio for S3 API
- MVP: backed by HA storage solution (e.g. AWS S3, Azure Blob Storage)
- Stable: backed by local storage, including backups
- Split archives into chunks of chosen size
- Enables easier transfer via physical media
- Allows smaller network transfers, losing less progress on failed upload (or working around timeouts)

View File

@@ -1,49 +0,0 @@
## Hauler Vagrant machine
A Vagrantfile is provided to allow easy provisioning of a local air-gapped CentOS environment. Some artifacts need to be collected from the internet; below are the steps required for successfully provisioning this machine, downloading all dependencies, and installing k3s (without hauler) into this machine.
### First-time setup
1. Install vagrant, if needed: <https://www.vagrantup.com/downloads>
2. Install `vagrant-vbguest` plugin, as noted in the Vagrantfile:
```shell
vagrant plugin install vagrant-vbguest
```
3. Deploy Vagrant machine, disabling SELinux:
```shell
SELINUX=Disabled vagrant up
```
4. Access the Vagrant machine via SSH:
```shell
vagrant ssh
```
5. Run all prep scripts inside of the Vagrant machine:
> This script temporarily enables internet access from within the VM to allow downloading all dependencies. Even so, the air-gapped network configuration IS restored before completion.
```shell
sudo /opt/hauler/vagrant-scripts/prep-all.sh
```
All dependencies for all `vagrant-scripts/*-install.sh` scripts are now downloaded to the local
repository under `local-artifacts`.
### Installing k3s manually
1. Access the Vagrant machine via SSH:
```bash
vagrant ssh
```
2. Run the k3s install script inside of the Vagrant machine:
```shell
sudo /opt/hauler/vagrant-scripts/k3s-install.sh
```
### Installing RKE2 manually
1. Access the Vagrant machine via SSH:
```shell
vagrant ssh
```
2. Run the RKE2 install script inside of the Vagrant machine:
```shell
sudo /opt/hauler/vagrant-scripts/rke2-install.sh
```

65
Vagrantfile vendored
View File

@@ -1,65 +0,0 @@
##################################
# The vagrant-vbguest plugin is required for CentOS 7.
# Run the following command to install/update this plugin:
# vagrant plugin install vagrant-vbguest
##################################
Vagrant.configure("2") do |config|
config.vm.box = "centos/8"
config.vm.hostname = "airgap"
config.vm.network "private_network", type: "dhcp"
config.vm.synced_folder ".", "/vagrant"
config.vm.provider "virtualbox" do |vb|
vb.memory = "2048"
vb.cpus = "2"
config.vm.provision "airgap", type: "shell", run: "always",
inline: "/vagrant/vagrant-scripts/airgap.sh airgap"
end
# SELinux is Enforcing by default.
# To set SELinux as Disabled on a VM that has already been provisioned:
# SELINUX=Disabled vagrant up --provision-with=selinux
# To set SELinux as Permissive on a VM that has already been provsioned
# SELINUX=Permissive vagrant up --provision-with=selinux
config.vm.provision "selinux", type: "shell", run: "once" do |sh|
sh.upload_path = "/tmp/vagrant-selinux"
sh.env = {
'SELINUX': ENV['SELINUX'] || "Enforcing"
}
sh.inline = <<~SHELL
#!/usr/bin/env bash
set -eux -o pipefail
if ! type -p getenforce setenforce &>/dev/null; then
echo SELinux is Disabled
exit 0
fi
case "${SELINUX}" in
Disabled)
if mountpoint -q /sys/fs/selinux; then
setenforce 0
umount -v /sys/fs/selinux
fi
;;
Enforcing)
mountpoint -q /sys/fs/selinux || mount -o rw,relatime -t selinuxfs selinuxfs /sys/fs/selinux
setenforce 1
;;
Permissive)
mountpoint -q /sys/fs/selinux || mount -o rw,relatime -t selinuxfs selinuxfs /sys/fs/selinux
setenforce 0
;;
*)
echo "SELinux mode not supported: ${SELINUX}" >&2
exit 1
;;
esac
echo SELinux is $(getenforce)
SHELL
end
end

View File

@@ -1,61 +0,0 @@
package app
import (
"context"
"github.com/rancherfederal/hauler/pkg/oci"
"github.com/spf13/cobra"
)
var (
copyLong = `hauler copies artifacts stored on a registry to local disk`
copyExample = `
# Run Hauler
hauler copy locahost:5000/artifacts:latest
`
)
type copyOpts struct {
*rootOpts
dir string
sourceRef string
}
// NewCopyCommand creates a new sub command under
// hauler for coping files to local disk
func NewCopyCommand() *cobra.Command {
opts := &copyOpts{
rootOpts: &ro,
}
cmd := &cobra.Command{
Use: "copy",
Short: "Download artifacts from OCI registry to local disk",
Long: copyLong,
Example: copyExample,
Aliases: []string{"c", "cp"},
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.sourceRef = args[0]
return opts.Run(opts.sourceRef)
},
}
f := cmd.Flags()
f.StringVarP(&opts.dir, "dir", "d", ".", "Target directory for file copy")
return cmd
}
// Run performs the operation.
func (o *copyOpts) Run(src string) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
if err := oci.Get(ctx, src, o.dir); err != nil {
o.logger.Errorf("error copy artifact %s to local directory %s: %v", src, o.dir, err)
}
return nil
}

View File

@@ -1,42 +0,0 @@
package app
import (
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/spf13/cobra"
)
type ociOpts struct {
insecure bool
plainHTTP bool
}
const (
haulerMediaType = "application/vnd.oci.image"
)
func NewOCICommand() *cobra.Command {
opts := ociOpts{}
cmd := &cobra.Command{
Use: "oci",
Short: "oci stuff",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(NewOCIPushCommand())
cmd.AddCommand(NewOCIPullCommand())
f := cmd.Flags()
f.BoolVarP(&opts.insecure, "insecure", "", false, "Connect to registry without certs")
f.BoolVarP(&opts.plainHTTP, "plain-http", "", false, "Connect to registry over plain http")
return cmd
}
func (o *ociOpts) resolver() (remotes.Resolver, error) {
resolver := docker.NewResolver(docker.ResolverOptions{PlainHTTP: true})
return resolver, nil
}

View File

@@ -1,67 +0,0 @@
package app
import (
"context"
"github.com/deislabs/oras/pkg/content"
"github.com/deislabs/oras/pkg/oras"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
type ociPullOpts struct {
ociOpts
sourceRef string
outDir string
}
func NewOCIPullCommand() *cobra.Command {
opts := ociPullOpts{}
cmd := &cobra.Command{
Use: "pull",
Short: "oci pull",
Aliases: []string{"p"},
Args: cobra.MinimumNArgs(1),
PreRunE: func(cmd *cobra.Command, args []string) error {
return opts.PreRun()
},
RunE: func(cmd *cobra.Command, args []string) error {
opts.sourceRef = args[0]
return opts.Run()
},
}
f := cmd.Flags()
f.StringVarP(&opts.outDir, "out-dir", "o", ".", "output directory")
return cmd
}
func (o *ociPullOpts) PreRun() error {
return nil
}
func (o *ociPullOpts) Run() error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
store := content.NewFileStore(o.outDir)
defer store.Close()
allowedMediaTypes := []string{
haulerMediaType,
}
resolver, err := o.resolver()
if err != nil {
return err
}
desc, _, err := oras.Pull(ctx, resolver, o.sourceRef, store, oras.WithAllowedMediaTypes(allowedMediaTypes))
logrus.Infof("pulled %s with digest: %s", o.sourceRef, desc.Digest)
return nil
}

View File

@@ -1,74 +0,0 @@
package app
import (
"context"
"github.com/deislabs/oras/pkg/content"
"github.com/deislabs/oras/pkg/oras"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"os"
)
type ociPushOpts struct {
ociOpts
targetRef string
pathRef string
}
func NewOCIPushCommand() *cobra.Command {
opts := ociPushOpts{}
cmd := &cobra.Command{
Use: "push",
Short: "oci push",
Aliases: []string{"p"},
Args: cobra.MinimumNArgs(2),
PreRunE: func(cmd *cobra.Command, args []string) error {
return opts.PreRun()
},
RunE: func(cmd *cobra.Command, args []string) error {
opts.pathRef = args[0]
opts.targetRef = args[1]
return opts.Run()
},
}
return cmd
}
func (o *ociPushOpts) PreRun() error {
return nil
}
func (o *ociPushOpts) Run() error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
data, err := os.ReadFile(o.pathRef)
if err != nil {
return err
}
resolver, err := o.resolver()
if err != nil {
return err
}
store := content.NewMemoryStore()
contents := []ocispec.Descriptor{
store.Add(o.pathRef, haulerMediaType, data),
}
desc, err := oras.Push(ctx, resolver, o.targetRef, store, contents)
if err != nil {
return err
}
logrus.Infof("pushed %s to %s with digest: %s", o.pathRef, o.targetRef, desc.Digest)
return nil
}

View File

@@ -1,25 +0,0 @@
package app
import "github.com/spf13/cobra"
type pkgOpts struct{}
func NewPkgCommand() *cobra.Command {
opts := &pkgOpts{}
//TODO
_ = opts
cmd := &cobra.Command{
Use: "pkg",
Short: "Interact with packages",
Aliases: []string{"p", "package"},
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(NewPkgBuildCommand())
cmd.AddCommand(NewPkgRunCommand())
return cmd
}

View File

@@ -1,202 +0,0 @@
package app
import (
"context"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/driver"
"github.com/rancherfederal/hauler/pkg/packager"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"os"
"sigs.k8s.io/yaml"
)
type pkgBuildOpts struct {
*rootOpts
cfgFile string
name string
dir string
driver string
driverVersion string
fleetVersion string
images []string
paths []string
}
func NewPkgBuildCommand() *cobra.Command {
opts := pkgBuildOpts{
rootOpts: &ro,
}
cmd := &cobra.Command{
Use: "build",
Short: "Build a self contained compressed archive of manifests and images",
Long: `
Compressed archives created with this command can be extracted and run anywhere the underlying 'driver' can be run.
Archives are built by collecting all the dependencies (images and manifests) required.
Examples:
# Build a package containing a helm chart with images autodetected from the generated helm chart
hauler package build -p path/to/helm/chart
# Build a package, sourcing from multiple manifest sources and additional images not autodetected
hauler pkg build -p path/to/raw/manifests -p path/to/kustomize -i busybox:latest -i busybox:musl
# Build a package using a different version of k3s
hauler p build -p path/to/chart --driver-version "v1.20.6+k3s1"
# Build a package from a config file (if ./pkg.yaml does not exist, one will be created)
hauler package build -c ./pkg.yaml
`,
Aliases: []string{"b"},
PreRunE: func(cmd *cobra.Command, args []string) error {
return opts.PreRun()
},
RunE: func(cmd *cobra.Command, args []string) error {
return opts.Run()
},
}
f := cmd.PersistentFlags()
f.StringVarP(&opts.name, "name", "n", "pkg",
"name of the pkg to create, will dicate file name")
f.StringVarP(&opts.cfgFile, "config", "c", "",
"path to config file")
f.StringVar(&opts.dir, "directory", "",
"Working directory for building package, if empty, an ephemeral temporary directory will be used. Set this to persist package artifacts between builds.")
f.StringVarP(&opts.driver, "driver", "d", "k3s",
"")
f.StringVar(&opts.driverVersion, "driver-version", "v1.21.1+k3s1",
"")
f.StringVar(&opts.fleetVersion, "fleet-version", "v0.3.5",
"")
f.StringSliceVarP(&opts.paths, "path", "p", []string{},
"")
f.StringSliceVarP(&opts.images, "image", "i", []string{},
"")
return cmd
}
func (o *pkgBuildOpts) PreRun() error {
_, err := os.Stat(o.cfgFile)
if os.IsNotExist(err) {
if o.cfgFile == "" {
return nil
}
o.logger.Warnf("Did not find an existing %s, creating one", o.cfgFile)
p := o.toPackage()
data, err := yaml.Marshal(p)
if err != nil {
return err
}
if err := os.WriteFile(o.cfgFile, data, 0644); err != nil {
return err
}
} else if err != nil {
return err
}
return nil
}
func (o *pkgBuildOpts) Run() error {
o.logger.Infof("Building package")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var p v1alpha1.Package
if o.cfgFile != "" {
o.logger.Infof("Config file '%s' specified, attempting to load existing package config", o.cfgFile)
cfgData, err := os.ReadFile(o.cfgFile)
if err != nil {
return err
}
if err := yaml.Unmarshal(cfgData, &p); err != nil {
return err
}
} else {
o.logger.Infof("No config file specified, strictly using cli arguments")
p = o.toPackage()
}
var wdir string
if o.dir != "" {
if _, err := os.Stat(o.dir); err != nil {
o.logger.Errorf("Failed to use specified working directory: %s\n%v", err)
return err
}
wdir = o.dir
} else {
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
return err
}
defer os.RemoveAll(tmpdir)
wdir = tmpdir
}
pkgr := packager.NewPackager(wdir, o.logger)
d := driver.NewDriver(p.Spec.Driver)
if _, bErr := pkgr.PackageBundles(ctx, p.Spec.Paths...); bErr != nil {
return bErr
}
if iErr := pkgr.PackageImages(ctx, o.images...); iErr != nil {
return iErr
}
if dErr := pkgr.PackageDriver(ctx, d); dErr != nil {
return dErr
}
if fErr := pkgr.PackageFleet(ctx, p.Spec.Fleet); fErr != nil {
return fErr
}
a := packager.NewArchiver()
if aErr := pkgr.Archive(a, p, o.name); aErr != nil {
return aErr
}
o.logger.Successf("Finished building package")
return nil
}
func (o *pkgBuildOpts) toPackage() v1alpha1.Package {
p := v1alpha1.Package{
TypeMeta: metav1.TypeMeta{
Kind: "",
APIVersion: "",
},
ObjectMeta: metav1.ObjectMeta{
Name: o.name,
},
Spec: v1alpha1.PackageSpec{
Fleet: v1alpha1.Fleet{
Version: o.fleetVersion,
},
Driver: v1alpha1.Driver{
Type: o.driver,
Version: o.driverVersion,
},
Paths: o.paths,
Images: o.images,
},
}
return p
}

View File

@@ -1,84 +0,0 @@
package app
import (
"os"
"testing"
)
func Test_pkgBuildOpts_Run(t *testing.T) {
l, _ := setupCliLogger(os.Stdout, "debug")
tro := rootOpts{l}
type fields struct {
rootOpts *rootOpts
cfgFile string
name string
driver string
driverVersion string
fleetVersion string
images []string
paths []string
}
tests := []struct {
name string
fields fields
wantErr bool
}{
{
name: "should package all types of local manifests",
fields: fields{
rootOpts: &tro,
cfgFile: "pkg.yaml",
name: "k3s",
driver: "k3s",
driverVersion: "v1.21.1+k3s1",
fleetVersion: "v0.3.5",
images: nil,
paths: []string{
"../../../testdata/docker-registry",
"../../../testdata/rawmanifests",
},
},
wantErr: false,
},
{
name: "should package using fleet.yaml",
fields: fields{
rootOpts: &tro,
cfgFile: "pkg.yaml",
name: "k3s",
driver: "k3s",
driverVersion: "v1.21.1+k3s1",
fleetVersion: "v0.3.5",
images: nil,
paths: []string{
"../../../testdata/custom",
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
o := &pkgBuildOpts{
rootOpts: tt.fields.rootOpts,
cfgFile: tt.fields.cfgFile,
name: tt.fields.name,
driver: tt.fields.driver,
driverVersion: tt.fields.driverVersion,
fleetVersion: tt.fields.fleetVersion,
images: tt.fields.images,
paths: tt.fields.paths,
}
if err := o.PreRun(); err != nil {
t.Errorf("PreRun() error = %v", err)
}
defer os.Remove(o.cfgFile)
if err := o.Run(); (err != nil) != tt.wantErr {
t.Errorf("Run() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@@ -1,91 +0,0 @@
package app
import (
"context"
"github.com/rancherfederal/hauler/pkg/bootstrap"
"github.com/rancherfederal/hauler/pkg/driver"
"github.com/rancherfederal/hauler/pkg/packager"
"github.com/spf13/cobra"
"os"
)
type pkgRunOpts struct {
*rootOpts
cfgFile string
}
func NewPkgRunCommand() *cobra.Command {
opts := pkgRunOpts{
rootOpts: &ro,
}
cmd := &cobra.Command{
Use: "run",
Short: "Run a compressed archive",
Long: `
Run a compressed archive created from a 'hauler package build'.
Examples:
# Run a package
hauler package run pkg.tar.zst
`,
Aliases: []string{"r"},
Args: cobra.MinimumNArgs(1),
PreRunE: func(cmd *cobra.Command, args []string) error {
return opts.PreRun()
},
RunE: func(cmd *cobra.Command, args []string) error {
return opts.Run(args[0])
},
}
return cmd
}
func (o *pkgRunOpts) PreRun() error {
return nil
}
func (o *pkgRunOpts) Run(pkgPath string) error {
o.logger.Infof("Running from '%s'", pkgPath)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
return err
}
o.logger.Debugf("Using temporary working directory: %s", tmpdir)
a := packager.NewArchiver()
if err := packager.Unpackage(a, pkgPath, tmpdir); err != nil {
return err
}
o.logger.Debugf("Unpackaged %s", pkgPath)
b, err := bootstrap.NewBooter(tmpdir, o.logger)
if err != nil {
return err
}
d := driver.NewDriver(b.Package.Spec.Driver)
if preErr := b.PreBoot(ctx, d); preErr != nil {
return preErr
}
if bErr := b.Boot(ctx, d); bErr != nil {
return bErr
}
if postErr := b.PostBoot(ctx, d); postErr != nil {
return postErr
}
o.logger.Successf("Access the cluster with '/opt/hauler/bin/kubectl'")
return nil
}

View File

@@ -1,33 +0,0 @@
package app
import (
"github.com/spf13/cobra"
)
type relocateOpts struct {
inputFile string
*rootOpts
}
// NewRelocateCommand creates a new sub command under
// haulterctl for relocating images and artifacts
func NewRelocateCommand() *cobra.Command {
opts := &relocateOpts{
rootOpts: &ro,
}
cmd := &cobra.Command{
Use: "relocate",
Short: "relocate images or artifacts to a registry",
Long: "",
Aliases: []string{"r"},
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(NewRelocateArtifactsCommand(opts))
cmd.AddCommand(NewRelocateImagesCommand(opts))
return cmd
}

View File

@@ -1,56 +0,0 @@
package app
import (
"context"
"github.com/rancherfederal/hauler/pkg/oci"
"github.com/spf13/cobra"
)
type relocateArtifactsOpts struct {
*relocateOpts
destRef string
}
var (
relocateArtifactsLong = `hauler relocate artifacts process an archive with files to be pushed to a registry`
relocateArtifactsExample = `
# Run Hauler
hauler relocate artifacts artifacts.tar.zst locahost:5000/artifacts:latest
`
)
// NewRelocateArtifactsCommand creates a new sub command of relocate for artifacts
func NewRelocateArtifactsCommand(relocate *relocateOpts) *cobra.Command {
opts := &relocateArtifactsOpts{
relocateOpts: relocate,
}
cmd := &cobra.Command{
Use: "artifacts",
Short: "Use artifact from bundle artifacts to populate a target file server with the artifact's contents",
Long: relocateArtifactsLong,
Example: relocateArtifactsExample,
Args: cobra.MinimumNArgs(2),
Aliases: []string{"a", "art", "af"},
RunE: func(cmd *cobra.Command, args []string) error {
opts.inputFile = args[0]
opts.destRef = args[1]
return opts.Run(opts.destRef, opts.inputFile)
},
}
return cmd
}
func (o *relocateArtifactsOpts) Run(dst string, input string) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
if err := oci.Put(ctx, input, dst); err != nil {
o.logger.Errorf("error pushing artifact to registry %s: %v", dst, err)
}
return nil
}

View File

@@ -1,103 +0,0 @@
package app
import (
"os"
"path/filepath"
"strings"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/layout"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/rancherfederal/hauler/pkg/oci"
"github.com/rancherfederal/hauler/pkg/packager"
"github.com/spf13/cobra"
)
var (
relocateImagesLong = `hauler relocate images processes a bundle provides by hauler package build and copies all of
the collected images to a registry`
relocateImagesExample = `
# Run Hauler
hauler relocate images pkg.tar.zst locahost:5000
`
)
type relocateImagesOpts struct {
*relocateOpts
destRef string
}
// NewRelocateImagesCommand creates a new sub command of relocate for images
func NewRelocateImagesCommand(relocate *relocateOpts) *cobra.Command {
opts := &relocateImagesOpts{
relocateOpts: relocate,
}
cmd := &cobra.Command{
Use: "images",
Short: "Use artifact from bundle images to populate a target registry with the artifact's images",
Long: relocateImagesLong,
Example: relocateImagesExample,
Args: cobra.MinimumNArgs(2),
Aliases: []string{"i", "img", "imgs"},
RunE: func(cmd *cobra.Command, args []string) error {
opts.inputFile = args[0]
opts.destRef = args[1]
return opts.Run(opts.destRef, opts.inputFile)
},
}
return cmd
}
func (o *relocateImagesOpts) Run(dst string, input string) error {
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
return err
}
o.logger.Debugf("Using temporary working directory: %s", tmpdir)
a := packager.NewArchiver()
if err := packager.Unpackage(a, input, tmpdir); err != nil {
o.logger.Errorf("error unpackaging input %s: %v", input, err)
}
o.logger.Debugf("Unpackaged %s", input)
path := filepath.Join(tmpdir, "layout")
ly, err := layout.FromPath(path)
if err != nil {
o.logger.Errorf("error creating OCI layout: %v", err)
}
for nm, hash := range oci.ListImages(ly) {
n := strings.SplitN(nm, "/", 2)
img, err := ly.Image(hash)
o.logger.Infof("Copy %s to %s", n[1], dst)
if err != nil {
o.logger.Errorf("error creating image from layout: %v", err)
}
dstimg := dst + "/" + n[1]
tag, err := name.ParseReference(dstimg)
if err != nil {
o.logger.Errorf("err parsing destination image %s: %v", dstimg, err)
}
if err := remote.Write(tag, img); err != nil {
o.logger.Errorf("error writing image to destination registry %s: %v", dst, err)
}
}
return nil
}

View File

@@ -1,81 +0,0 @@
package app
import (
"io"
"os"
"time"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/spf13/cobra"
)
var (
loglevel string
timeout time.Duration
getLong = `hauler provides CLI-based air-gap migration assistance using k3s.
Choose your functionality and new a package when internet access is available,
then deploy the package into your air-gapped environment.
`
getExample = `
hauler pkg build
hauler pkg run pkg.tar.zst
hauler relocate artifacts artifacts.tar.zst
hauler relocate images pkg.tar.zst locahost:5000
hauler copy localhost:5000/artifacts:latest
`
)
type rootOpts struct {
logger log.Logger
}
var ro rootOpts
// NewRootCommand defines the root hauler command
func NewRootCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "hauler",
Short: "hauler provides CLI-based air-gap migration assistance",
Long: getLong,
Example: getExample,
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
l, err := setupCliLogger(os.Stdout, loglevel)
if err != nil {
return err
}
ro.logger = l
return nil
},
RunE: func(cmd *cobra.Command, _ []string) error {
return cmd.Help()
},
}
cobra.OnInitialize()
cmd.AddCommand(NewRelocateCommand())
cmd.AddCommand(NewCopyCommand())
cmd.AddCommand(NewPkgCommand())
f := cmd.PersistentFlags()
f.StringVarP(&loglevel, "loglevel", "l", "debug",
"Log level (debug, info, warn, error, fatal, panic)")
f.DurationVar(&timeout, "timeout", 1*time.Minute,
"TODO: timeout for operations")
return cmd
}
func setupCliLogger(out io.Writer, level string) (log.Logger, error) {
l := log.NewLogger(out)
return l, nil
}

40
cmd/hauler/cli/cli.go Normal file
View File

@@ -0,0 +1,40 @@
package cli
import (
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/pkg/log"
)
type rootOpts struct {
logLevel string
}
var ro = &rootOpts{}
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "hauler",
Short: "Airgap Swiss Army Knife",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
l := log.FromContext(cmd.Context())
l.SetLevel(ro.logLevel)
l.Debugf("running cli command [%s]", cmd.CommandPath())
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
pf := cmd.PersistentFlags()
pf.StringVarP(&ro.logLevel, "log-level", "l", "info", "")
// Add subcommands
addLogin(cmd)
addStore(cmd)
addVersion(cmd)
addCompletion(cmd)
return cmd
}

View File

@@ -0,0 +1,124 @@
package cli
import (
"fmt"
"os"
"github.com/spf13/cobra"
)
func addCompletion(parent *cobra.Command) {
cmd := &cobra.Command{
Use: "completion",
Short: "Generates completion scripts for various shells",
Long: `The completion sub-command generates completion scripts for various shells.`,
}
cmd.AddCommand(
addCompletionZsh(),
addCompletionBash(),
addCompletionFish(),
addCompletionPowershell(),
)
parent.AddCommand(cmd)
}
func completionError(err error) ([]string, cobra.ShellCompDirective) {
cobra.CompError(err.Error())
return nil, cobra.ShellCompDirectiveError
}
func addCompletionZsh() *cobra.Command {
cmd := &cobra.Command{
Use: "zsh",
Short: "Generates zsh completion scripts",
Long: `The completion sub-command generates completion scripts for zsh.`,
Example: `To load completion run
. <(hauler completion zsh)
To configure your zsh shell to load completions for each session add to your zshrc
# ~/.zshrc or ~/.profile
command -v hauler >/dev/null && . <(hauler completion zsh)
or write a cached file in one of the completion directories in your ${fpath}:
echo "${fpath// /\n}" | grep -i completion
hauler completion zsh > _hauler
mv _hauler ~/.oh-my-zsh/completions # oh-my-zsh
mv _hauler ~/.zprezto/modules/completion/external/src/ # zprezto`,
Run: func(cmd *cobra.Command, args []string) {
cmd.GenZshCompletion(os.Stdout)
// Cobra doesn't source zsh completion file, explicitly doing it here
fmt.Println("compdef _hauler hauler")
},
}
return cmd
}
func addCompletionBash() *cobra.Command {
cmd := &cobra.Command{
Use: "bash",
Short: "Generates bash completion scripts",
Long: `The completion sub-command generates completion scripts for bash.`,
Example: `To load completion run
. <(hauler completion bash)
To configure your bash shell to load completions for each session add to your bashrc
# ~/.bashrc or ~/.profile
command -v hauler >/dev/null && . <(hauler completion bash)`,
Run: func(cmd *cobra.Command, args []string) {
cmd.GenBashCompletion(os.Stdout)
},
}
return cmd
}
func addCompletionFish() *cobra.Command {
cmd := &cobra.Command{
Use: "fish",
Short: "Generates fish completion scripts",
Long: `The completion sub-command generates completion scripts for fish.`,
Example: `To configure your fish shell to load completions for each session write this script to your completions dir:
hauler completion fish > ~/.config/fish/completions/hauler.fish
See http://fishshell.com/docs/current/index.html#completion-own for more details`,
Run: func(cmd *cobra.Command, args []string) {
cmd.GenFishCompletion(os.Stdout, true)
},
}
return cmd
}
func addCompletionPowershell() *cobra.Command {
cmd := &cobra.Command{
Use: "powershell",
Short: "Generates powershell completion scripts",
Long: `The completion sub-command generates completion scripts for powershell.`,
Example: `To load completion run
. <(hauler completion powershell)
To configure your powershell shell to load completions for each session add to your powershell profile
Windows:
cd "$env:USERPROFILE\Documents\WindowsPowerShell\Modules"
hauler completion powershell >> hauler-completion.ps1
Linux:
cd "${XDG_CONFIG_HOME:-"$HOME/.config/"}/powershell/modules"
hauler completion powershell >> hauler-completions.ps1`,
Run: func(cmd *cobra.Command, args []string) {
cmd.GenPowerShellCompletion(os.Stdout)
},
}
return cmd
}

75
cmd/hauler/cli/login.go Normal file
View File

@@ -0,0 +1,75 @@
package cli
import (
"context"
"fmt"
"io"
"os"
"strings"
"github.com/spf13/cobra"
"oras.land/oras-go/pkg/content"
"github.com/rancherfederal/hauler/pkg/cosign"
)
type Opts struct {
Username string
Password string
PasswordStdin bool
}
func (o *Opts) AddArgs(cmd *cobra.Command) {
f := cmd.Flags()
f.StringVarP(&o.Username, "username", "u", "", "Username")
f.StringVarP(&o.Password, "password", "p", "", "Password")
f.BoolVarP(&o.PasswordStdin, "password-stdin", "", false, "Take the password from stdin")
}
func addLogin(parent *cobra.Command) {
o := &Opts{}
cmd := &cobra.Command{
Use: "login",
Short: "Log in to a registry",
Example: `
# Log in to reg.example.com
hauler login reg.example.com -u bob -p haulin`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, arg []string) error {
ctx := cmd.Context()
if o.PasswordStdin {
contents, err := io.ReadAll(os.Stdin)
if err != nil {
return err
}
o.Password = strings.TrimSuffix(string(contents), "\n")
o.Password = strings.TrimSuffix(o.Password, "\r")
}
if o.Username == "" && o.Password == "" {
return fmt.Errorf("username and password required")
}
return login(ctx, o, arg[0])
},
}
o.AddArgs(cmd)
parent.AddCommand(cmd)
}
func login(ctx context.Context, o *Opts, registry string) error {
ropts := content.RegistryOptions{
Username: o.Username,
Password: o.Password,
}
err := cosign.RegistryLogin(ctx, nil, registry, ropts)
if err != nil {
return err
}
return nil
}

352
cmd/hauler/cli/store.go Normal file
View File

@@ -0,0 +1,352 @@
package cli
import (
"fmt"
"github.com/spf13/cobra"
"helm.sh/helm/v3/pkg/action"
"github.com/rancherfederal/hauler/cmd/hauler/cli/store"
)
var rootStoreOpts = &store.RootOpts{}
func addStore(parent *cobra.Command) {
cmd := &cobra.Command{
Use: "store",
Aliases: []string{"s"},
Short: "Interact with hauler's embedded content store",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
rootStoreOpts.AddArgs(cmd)
cmd.AddCommand(
addStoreSync(),
addStoreExtract(),
addStoreLoad(),
addStoreSave(),
addStoreServe(),
addStoreInfo(),
addStoreCopy(),
// TODO: Remove this in favor of sync?
addStoreAdd(),
)
parent.AddCommand(cmd)
}
func addStoreExtract() *cobra.Command {
o := &store.ExtractOpts{RootOpts: rootStoreOpts}
cmd := &cobra.Command{
Use: "extract",
Short: "Extract content from the store to disk",
Aliases: []string{"x"},
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := o.Store(ctx)
if err != nil {
return err
}
return store.ExtractCmd(ctx, o, s, args[0])
},
}
o.AddArgs(cmd)
return cmd
}
func addStoreSync() *cobra.Command {
o := &store.SyncOpts{RootOpts: rootStoreOpts}
cmd := &cobra.Command{
Use: "sync",
Short: "Sync content to the embedded content store",
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := o.Store(ctx)
if err != nil {
return err
}
return store.SyncCmd(ctx, o, s)
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreLoad() *cobra.Command {
o := &store.LoadOpts{RootOpts: rootStoreOpts}
cmd := &cobra.Command{
Use: "load",
Short: "Load a content store from a store archive",
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := o.Store(ctx)
if err != nil {
return err
}
_ = s
return store.LoadCmd(ctx, o, args...)
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreServe() *cobra.Command {
cmd := &cobra.Command{
Use: "serve",
Short: "Expose the content of a local store through an OCI compliant registry or file server",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(
addStoreServeRegistry(),
addStoreServeFiles(),
)
return cmd
}
// RegistryCmd serves the embedded registry
func addStoreServeRegistry() *cobra.Command {
o := &store.ServeRegistryOpts{RootOpts: rootStoreOpts}
cmd := &cobra.Command{
Use: "registry",
Short: "Serve the embedded registry",
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := o.Store(ctx)
if err != nil {
return err
}
return store.ServeRegistryCmd(ctx, o, s)
},
}
o.AddFlags(cmd)
return cmd
}
// FileServerCmd serves the file server
func addStoreServeFiles() *cobra.Command {
o := &store.ServeFilesOpts{RootOpts: rootStoreOpts}
cmd := &cobra.Command{
Use: "fileserver",
Short: "Serve the file server",
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := o.Store(ctx)
if err != nil {
return err
}
return store.ServeFilesCmd(ctx, o, s)
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreSave() *cobra.Command {
o := &store.SaveOpts{RootOpts: rootStoreOpts}
cmd := &cobra.Command{
Use: "save",
Short: "Save a content store to a store archive",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := o.Store(ctx)
if err != nil {
return err
}
_ = s
return store.SaveCmd(ctx, o, o.FileName)
},
}
o.AddArgs(cmd)
return cmd
}
func addStoreInfo() *cobra.Command {
o := &store.InfoOpts{RootOpts: rootStoreOpts}
var allowedValues = []string{"image", "chart", "file", "sigs", "atts", "sbom", "all"}
cmd := &cobra.Command{
Use: "info",
Short: "Print out information about the store",
Args: cobra.ExactArgs(0),
Aliases: []string{"i", "list", "ls"},
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := o.Store(ctx)
if err != nil {
return err
}
for _, allowed := range allowedValues {
if o.TypeFilter == allowed {
return store.InfoCmd(ctx, o, s)
}
}
return fmt.Errorf("type must be one of %v", allowedValues)
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreCopy() *cobra.Command {
o := &store.CopyOpts{RootOpts: rootStoreOpts}
cmd := &cobra.Command{
Use: "copy",
Short: "Copy all store contents to another OCI registry",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := o.Store(ctx)
if err != nil {
return err
}
return store.CopyCmd(ctx, o, s, args[0])
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreAdd() *cobra.Command {
cmd := &cobra.Command{
Use: "add",
Short: "Add content to store",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(
addStoreAddFile(),
addStoreAddImage(),
addStoreAddChart(),
)
return cmd
}
func addStoreAddFile() *cobra.Command {
o := &store.AddFileOpts{RootOpts: rootStoreOpts}
cmd := &cobra.Command{
Use: "file",
Short: "Add a file to the content store",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := o.Store(ctx)
if err != nil {
return err
}
return store.AddFileCmd(ctx, o, s, args[0])
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreAddImage() *cobra.Command {
o := &store.AddImageOpts{RootOpts: rootStoreOpts}
cmd := &cobra.Command{
Use: "image",
Short: "Add an image to the content store",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := o.Store(ctx)
if err != nil {
return err
}
return store.AddImageCmd(ctx, o, s, args[0])
},
}
o.AddFlags(cmd)
return cmd
}
func addStoreAddChart() *cobra.Command {
o := &store.AddChartOpts{
RootOpts: rootStoreOpts,
ChartOpts: &action.ChartPathOptions{},
}
cmd := &cobra.Command{
Use: "chart",
Short: "Add a local or remote chart to the content store",
Example: `
# add a local chart
hauler store add chart path/to/chart/directory
# add a local compressed chart
hauler store add chart path/to/chart.tar.gz
# add a remote chart
hauler store add chart longhorn --repo "https://charts.longhorn.io"
# add a specific version of a chart
hauler store add chart rancher --repo "https://releases.rancher.com/server-charts/latest" --version "2.6.2"
`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
s, err := o.Store(ctx)
if err != nil {
return err
}
return store.AddChartCmd(ctx, o, s, args[0])
},
}
o.AddFlags(cmd)
return cmd
}

174
cmd/hauler/cli/store/add.go Normal file
View File

@@ -0,0 +1,174 @@
package store
import (
"context"
"github.com/google/go-containerregistry/pkg/name"
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
"github.com/spf13/cobra"
"helm.sh/helm/v3/pkg/action"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/artifacts/file"
"github.com/rancherfederal/hauler/pkg/content/chart"
"github.com/rancherfederal/hauler/pkg/cosign"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/reference"
"github.com/rancherfederal/hauler/pkg/store"
)
type AddFileOpts struct {
*RootOpts
Name string
}
func (o *AddFileOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
f.StringVarP(&o.Name, "name", "n", "", "(Optional) Name to assign to file in store")
}
func AddFileCmd(ctx context.Context, o *AddFileOpts, s *store.Layout, reference string) error {
cfg := v1alpha1.File{
Path: reference,
}
if len(o.Name) > 0 {
cfg.Name = o.Name
}
return storeFile(ctx, s, cfg)
}
func storeFile(ctx context.Context, s *store.Layout, fi v1alpha1.File) error {
l := log.FromContext(ctx)
copts := getter.ClientOptions{
NameOverride: fi.Name,
}
f := file.NewFile(fi.Path, file.WithClient(getter.NewClient(copts)))
ref, err := reference.NewTagged(f.Name(fi.Path), reference.DefaultTag)
if err != nil {
return err
}
l.Infof("adding 'file' [%s] to the store as [%s]", fi.Path, ref.Name())
_, err = s.AddOCI(ctx, f, ref.Name())
if err != nil {
return err
}
l.Infof("successfully added 'file' [%s]", ref.Name())
return nil
}
type AddImageOpts struct {
*RootOpts
Name string
Key string
Platform string
}
func (o *AddImageOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Path to the key for digital signature verification")
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specific platform to save. i.e. linux/amd64. Defaults to all if flag is omitted.")
}
func AddImageCmd(ctx context.Context, o *AddImageOpts, s *store.Layout, reference string) error {
l := log.FromContext(ctx)
cfg := v1alpha1.Image{
Name: reference,
}
// Check if the user provided a key.
if o.Key != "" {
// verify signature using the provided key.
err := cosign.VerifySignature(ctx, s, o.Key, cfg.Name)
if err != nil {
return err
}
l.Infof("signature verified for image [%s]", cfg.Name)
}
return storeImage(ctx, s, cfg, o.Platform)
}
func storeImage(ctx context.Context, s *store.Layout, i v1alpha1.Image, platform string) error {
l := log.FromContext(ctx)
l.Infof("adding 'image' [%s] to the store", i.Name)
r, err := name.ParseReference(i.Name)
if err != nil {
return err
}
err = cosign.SaveImage(ctx, s, r.Name(), platform)
if err != nil {
return err
}
l.Infof("successfully added 'image' [%s]", r.Name())
return nil
}
type AddChartOpts struct {
*RootOpts
ChartOpts *action.ChartPathOptions
}
func (o *AddChartOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
f.StringVar(&o.ChartOpts.RepoURL, "repo", "", "chart repository url where to locate the requested chart")
f.StringVar(&o.ChartOpts.Version, "version", "", "specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a valid range (e.g. ^2.0.0). If this is not specified, the latest version is used")
f.BoolVar(&o.ChartOpts.Verify, "verify", false, "verify the package before using it")
f.StringVar(&o.ChartOpts.Username, "username", "", "chart repository username where to locate the requested chart")
f.StringVar(&o.ChartOpts.Password, "password", "", "chart repository password where to locate the requested chart")
f.StringVar(&o.ChartOpts.CertFile, "cert-file", "", "identify HTTPS client using this SSL certificate file")
f.StringVar(&o.ChartOpts.KeyFile, "key-file", "", "identify HTTPS client using this SSL key file")
f.BoolVar(&o.ChartOpts.InsecureSkipTLSverify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the chart download")
f.StringVar(&o.ChartOpts.CaFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
}
func AddChartCmd(ctx context.Context, o *AddChartOpts, s *store.Layout, chartName string) error {
// TODO: Reduce duplicates between api chart and upstream helm opts
cfg := v1alpha1.Chart{
Name: chartName,
RepoURL: o.ChartOpts.RepoURL,
Version: o.ChartOpts.Version,
}
return storeChart(ctx, s, cfg, o.ChartOpts)
}
func storeChart(ctx context.Context, s *store.Layout, cfg v1alpha1.Chart, opts *action.ChartPathOptions) error {
l := log.FromContext(ctx)
l.Infof("adding 'chart' [%s] to the store", cfg.Name)
// TODO: This shouldn't be necessary
opts.RepoURL = cfg.RepoURL
opts.Version = cfg.Version
chrt, err := chart.NewChart(cfg.Name, opts)
if err != nil {
return err
}
c, err := chrt.Load()
if err != nil {
return err
}
ref, err := reference.NewTagged(c.Name(), c.Metadata.Version)
if err != nil {
return err
}
_, err = s.AddOCI(ctx, chrt, ref.Name())
if err != nil {
return err
}
l.Infof("successfully added 'chart' [%s]", ref.Name())
return nil
}

View File

@@ -0,0 +1,76 @@
package store
import (
"context"
"fmt"
"strings"
"github.com/spf13/cobra"
"oras.land/oras-go/pkg/content"
"github.com/rancherfederal/hauler/pkg/cosign"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
type CopyOpts struct {
*RootOpts
Username string
Password string
Insecure bool
PlainHTTP bool
}
func (o *CopyOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
f.StringVarP(&o.Username, "username", "u", "", "Username when copying to an authenticated remote registry")
f.StringVarP(&o.Password, "password", "p", "", "Password when copying to an authenticated remote registry")
f.BoolVar(&o.Insecure, "insecure", false, "Toggle allowing insecure connections when copying to a remote registry")
f.BoolVar(&o.PlainHTTP, "plain-http", false, "Toggle allowing plain http connections when copying to a remote registry")
}
func CopyCmd(ctx context.Context, o *CopyOpts, s *store.Layout, targetRef string) error {
l := log.FromContext(ctx)
components := strings.SplitN(targetRef, "://", 2)
switch components[0] {
case "dir":
l.Debugf("identified directory target reference")
fs := content.NewFile(components[1])
defer fs.Close()
_, err := s.CopyAll(ctx, fs, nil)
if err != nil {
return err
}
case "registry":
l.Debugf("identified registry target reference")
ropts := content.RegistryOptions{
Username: o.Username,
Password: o.Password,
Insecure: o.Insecure,
PlainHTTP: o.PlainHTTP,
}
if ropts.Username != "" {
err := cosign.RegistryLogin(ctx, s, components[1], ropts)
if err != nil {
return err
}
}
err := cosign.LoadImages(ctx, s, components[1], ropts)
if err != nil {
return err
}
default:
return fmt.Errorf("detecting protocol from [%s]", targetRef)
}
l.Infof("copied artifacts to [%s]", components[1])
return nil
}

View File

@@ -0,0 +1,78 @@
package store
import (
"context"
"encoding/json"
"fmt"
"strings"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/internal/mapper"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/reference"
"github.com/rancherfederal/hauler/pkg/store"
)
type ExtractOpts struct {
*RootOpts
DestinationDir string
}
func (o *ExtractOpts) AddArgs(cmd *cobra.Command) {
f := cmd.Flags()
f.StringVarP(&o.DestinationDir, "output", "o", "", "Directory to save contents to (defaults to current directory)")
}
func ExtractCmd(ctx context.Context, o *ExtractOpts, s *store.Layout, ref string) error {
l := log.FromContext(ctx)
r, err := reference.Parse(ref)
if err != nil {
return err
}
found := false
if err := s.Walk(func(reference string, desc ocispec.Descriptor) error {
if !strings.Contains(reference, r.Name()) {
return nil
}
found = true
rc, err := s.Fetch(ctx, desc)
if err != nil {
return err
}
defer rc.Close()
var m ocispec.Manifest
if err := json.NewDecoder(rc).Decode(&m); err != nil {
return err
}
mapperStore, err := mapper.FromManifest(m, o.DestinationDir)
if err != nil {
return err
}
pushedDesc, err := s.Copy(ctx, reference, mapperStore, "")
if err != nil {
return err
}
l.Infof("extracted [%s] from store with digest [%s]", pushedDesc.MediaType, pushedDesc.Digest.String())
return nil
}); err != nil {
return err
}
if !found {
return fmt.Errorf("reference [%s] not found in store (hint: use `hauler store info` to list store contents)", ref)
}
return nil
}

View File

@@ -0,0 +1,54 @@
package store
import (
"context"
"errors"
"os"
"path/filepath"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
const (
DefaultStoreName = "store"
)
type RootOpts struct {
StoreDir string
CacheDir string
}
func (o *RootOpts) AddArgs(cmd *cobra.Command) {
pf := cmd.PersistentFlags()
pf.StringVarP(&o.StoreDir, "store", "s", DefaultStoreName, "Location to create store at")
pf.StringVar(&o.CacheDir, "cache", "", "(deprecated flag and currently not used)")
}
func (o *RootOpts) Store(ctx context.Context) (*store.Layout, error) {
l := log.FromContext(ctx)
dir := o.StoreDir
abs, err := filepath.Abs(dir)
if err != nil {
return nil, err
}
l.Debugf("using store at %s", abs)
if _, err := os.Stat(abs); errors.Is(err, os.ErrNotExist) {
err := os.Mkdir(abs, os.ModePerm)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
s, err := store.NewLayout(abs)
if err != nil {
return nil, err
}
return s, nil
}

View File

@@ -0,0 +1,259 @@
package store
import (
"context"
"encoding/json"
"fmt"
"os"
"sort"
"github.com/olekukonko/tablewriter"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/pkg/consts"
"github.com/rancherfederal/hauler/pkg/reference"
"github.com/rancherfederal/hauler/pkg/store"
)
type InfoOpts struct {
*RootOpts
OutputFormat string
TypeFilter string
SizeUnit string
}
func (o *InfoOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
f.StringVarP(&o.OutputFormat, "output", "o", "table", "Output format (table, json)")
f.StringVarP(&o.TypeFilter, "type", "t", "all", "Filter on type (image, chart, file, sigs, atts, sbom)")
// TODO: Regex/globbing
}
func InfoCmd(ctx context.Context, o *InfoOpts, s *store.Layout) error {
var items []item
if err := s.Walk(func(ref string, desc ocispec.Descriptor) error {
if _, ok := desc.Annotations[ocispec.AnnotationRefName]; !ok {
return nil
}
rc, err := s.Fetch(ctx, desc)
if err != nil {
return err
}
defer rc.Close()
// handle multi-arch images
if desc.MediaType == consts.OCIImageIndexSchema || desc.MediaType == consts.DockerManifestListSchema2 {
var idx ocispec.Index
if err := json.NewDecoder(rc).Decode(&idx); err != nil {
return err
}
for _, internalDesc := range idx.Manifests {
rc, err := s.Fetch(ctx, internalDesc)
if err != nil {
return err
}
defer rc.Close()
var internalManifest ocispec.Manifest
if err := json.NewDecoder(rc).Decode(&internalManifest); err != nil {
return err
}
i := newItem(s, desc, internalManifest, fmt.Sprintf("%s/%s", internalDesc.Platform.OS, internalDesc.Platform.Architecture), o)
var emptyItem item
if i != emptyItem {
items = append(items, i)
}
}
// handle "non" multi-arch images
} else if desc.MediaType == consts.DockerManifestSchema2 || desc.MediaType == consts.OCIManifestSchema1 {
var m ocispec.Manifest
if err := json.NewDecoder(rc).Decode(&m); err != nil {
return err
}
rc, err := s.FetchManifest(ctx, m)
if err != nil {
return err
}
defer rc.Close()
// Unmarshal the OCI image content
var internalManifest ocispec.Image
if err := json.NewDecoder(rc).Decode(&internalManifest); err != nil {
return err
}
if internalManifest.Architecture != "" {
i := newItem(s, desc, m, fmt.Sprintf("%s/%s", internalManifest.OS, internalManifest.Architecture), o)
var emptyItem item
if i != emptyItem {
items = append(items, i)
}
} else {
i := newItem(s, desc, m, "-", o)
var emptyItem item
if i != emptyItem {
items = append(items, i)
}
}
// handle the rest
} else {
var m ocispec.Manifest
if err := json.NewDecoder(rc).Decode(&m); err != nil {
return err
}
i := newItem(s, desc, m, "-", o)
var emptyItem item
if i != emptyItem {
items = append(items, i)
}
}
return nil
}); err != nil {
return err
}
// sort items by ref and arch
sort.Sort(byReferenceAndArch(items))
var msg string
switch o.OutputFormat {
case "json":
msg = buildJson(items...)
fmt.Println(msg)
default:
buildTable(items...)
}
return nil
}
func buildTable(items ...item) {
// Create a table for the results
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Reference", "Type", "Platform", "# Layers", "Size"})
table.SetHeaderAlignment(tablewriter.ALIGN_LEFT)
table.SetRowLine(false)
table.SetAutoMergeCellsByColumnIndex([]int{0})
totalSize := int64(0)
for _, i := range items {
if i.Type != "" {
row := []string{
i.Reference,
i.Type,
i.Platform,
fmt.Sprintf("%d", i.Layers),
byteCountSI(i.Size),
}
totalSize += i.Size
table.Append(row)
}
}
table.SetFooter([]string{"", "", "", "Total", byteCountSI(totalSize)})
table.Render()
}
func buildJson(item ...item) string {
data, err := json.MarshalIndent(item, "", " ")
if err != nil {
return ""
}
return string(data)
}
type item struct {
Reference string
Type string
Platform string
Layers int
Size int64
}
type byReferenceAndArch []item
func (a byReferenceAndArch) Len() int { return len(a) }
func (a byReferenceAndArch) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byReferenceAndArch) Less(i, j int) bool {
if a[i].Reference == a[j].Reference {
if a[i].Type == "image" && a[j].Type == "image" {
return a[i].Platform < a[j].Platform
}
if a[i].Type == "image" {
return true
}
if a[j].Type == "image" {
return false
}
return a[i].Type < a[j].Type
}
return a[i].Reference < a[j].Reference
}
func newItem(s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, plat string, o *InfoOpts) item {
var size int64 = 0
for _, l := range m.Layers {
size += l.Size
}
// Generate a human-readable content type
var ctype string
switch m.Config.MediaType {
case consts.DockerConfigJSON:
ctype = "image"
case consts.ChartConfigMediaType:
ctype = "chart"
case consts.FileLocalConfigMediaType, consts.FileHttpConfigMediaType:
ctype = "file"
default:
ctype = "image"
}
switch desc.Annotations["kind"] {
case "dev.cosignproject.cosign/sigs":
ctype = "sigs"
case "dev.cosignproject.cosign/atts":
ctype = "atts"
case "dev.cosignproject.cosign/sboms":
ctype = "sbom"
}
ref, err := reference.Parse(desc.Annotations[ocispec.AnnotationRefName])
if err != nil {
return item{}
}
if o.TypeFilter != "all" && ctype != o.TypeFilter {
return item{}
}
return item{
Reference: ref.Name(),
Type: ctype,
Platform: plat,
Layers: len(m.Layers),
Size: size,
}
}
func byteCountSI(b int64) string {
const unit = 1000
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB",
float64(b)/float64(div), "kMGTPE"[exp])
}

View File

@@ -0,0 +1,70 @@
package store
import (
"context"
"os"
"github.com/mholt/archiver/v3"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/pkg/content"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
type LoadOpts struct {
*RootOpts
TempOverride string
}
func (o *LoadOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
// On Unix systems, the default is $TMPDIR if non-empty, else /tmp.
// On Windows, the default is GetTempPath, returning the first non-empty
// value from %TMP%, %TEMP%, %USERPROFILE%, or the Windows directory.
// On Plan 9, the default is /tmp.
f.StringVarP(&o.TempOverride, "tempdir", "t", "", "overrides the default directory for temporary files, as returned by your OS.")
}
// LoadCmd
// TODO: Just use mholt/archiver for now, even though we don't need most of it
func LoadCmd(ctx context.Context, o *LoadOpts, archiveRefs ...string) error {
l := log.FromContext(ctx)
for _, archiveRef := range archiveRefs {
l.Infof("loading content from [%s] to [%s]", archiveRef, o.StoreDir)
err := unarchiveLayoutTo(ctx, archiveRef, o.StoreDir, o.TempOverride)
if err != nil {
return err
}
}
return nil
}
// unarchiveLayoutTo accepts an archived oci layout and extracts the contents to an existing oci layout, preserving the index
func unarchiveLayoutTo(ctx context.Context, archivePath string, dest string, tempOverride string) error {
tmpdir, err := os.MkdirTemp(tempOverride, "hauler")
if err != nil {
return err
}
defer os.RemoveAll(tmpdir)
if err := archiver.Unarchive(archivePath, tmpdir); err != nil {
return err
}
s, err := store.NewLayout(tmpdir)
if err != nil {
return err
}
ts, err := content.NewOCI(dest)
if err != nil {
return err
}
_, err = s.CopyAll(ctx, ts, nil)
return err
}

View File

@@ -0,0 +1,55 @@
package store
import (
"context"
"os"
"path/filepath"
"github.com/mholt/archiver/v3"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/pkg/log"
)
type SaveOpts struct {
*RootOpts
FileName string
}
func (o *SaveOpts) AddArgs(cmd *cobra.Command) {
f := cmd.Flags()
f.StringVarP(&o.FileName, "filename", "f", "haul.tar.zst", "Name of archive")
}
// SaveCmd
// TODO: Just use mholt/archiver for now, even though we don't need most of it
func SaveCmd(ctx context.Context, o *SaveOpts, outputFile string) error {
l := log.FromContext(ctx)
// TODO: Support more formats?
a := archiver.NewTarZstd()
a.OverwriteExisting = true
absOutputfile, err := filepath.Abs(outputFile)
if err != nil {
return err
}
cwd, err := os.Getwd()
if err != nil {
return err
}
defer os.Chdir(cwd)
if err := os.Chdir(o.StoreDir); err != nil {
return err
}
err = a.Archive([]string{"."}, absOutputfile)
if err != nil {
return err
}
l.Infof("saved store [%s] -> [%s]", o.StoreDir, absOutputfile)
return nil
}

View File

@@ -0,0 +1,153 @@
package store
import (
"context"
"fmt"
"net/http"
"os"
"github.com/distribution/distribution/v3/configuration"
dcontext "github.com/distribution/distribution/v3/context"
_ "github.com/distribution/distribution/v3/registry/storage/driver/base"
_ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem"
_ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory"
"github.com/distribution/distribution/v3/version"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/internal/server"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
type ServeRegistryOpts struct {
*RootOpts
Port int
RootDir string
ConfigFile string
ReadOnly bool
}
func (o *ServeRegistryOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
f.IntVarP(&o.Port, "port", "p", 5000, "Port to listen on.")
f.StringVar(&o.RootDir, "directory", "registry", "Directory to use for backend. Defaults to $PWD/registry")
f.StringVarP(&o.ConfigFile, "config", "c", "", "Path to a config file, will override all other configs")
f.BoolVar(&o.ReadOnly, "readonly", true, "Run the registry as readonly.")
}
func ServeRegistryCmd(ctx context.Context, o *ServeRegistryOpts, s *store.Layout) error {
l := log.FromContext(ctx)
ctx = dcontext.WithVersion(ctx, version.Version)
tr := server.NewTempRegistry(ctx, o.RootDir)
if err := tr.Start(); err != nil {
return err
}
opts := &CopyOpts{}
if err := CopyCmd(ctx, opts, s, "registry://"+tr.Registry()); err != nil {
return err
}
tr.Close()
cfg := o.defaultRegistryConfig()
if o.ConfigFile != "" {
ucfg, err := loadConfig(o.ConfigFile)
if err != nil {
return err
}
cfg = ucfg
}
l.Infof("starting registry on port [%d]", o.Port)
r, err := server.NewRegistry(ctx, cfg)
if err != nil {
return err
}
if err = r.ListenAndServe(); err != nil {
return err
}
return nil
}
type ServeFilesOpts struct {
*RootOpts
Port int
Timeout int
RootDir string
}
func (o *ServeFilesOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
f.IntVarP(&o.Port, "port", "p", 8080, "Port to listen on.")
f.IntVarP(&o.Timeout, "timeout", "t", 60, "Set the http request timeout duration in seconds for both reads and write.")
f.StringVar(&o.RootDir, "directory", "fileserver", "Directory to use for backend. Defaults to $PWD/fileserver")
}
func ServeFilesCmd(ctx context.Context, o *ServeFilesOpts, s *store.Layout) error {
l := log.FromContext(ctx)
ctx = dcontext.WithVersion(ctx, version.Version)
opts := &CopyOpts{}
if err := CopyCmd(ctx, opts, s, "dir://"+o.RootDir); err != nil {
return err
}
cfg := server.FileConfig{
Root: o.RootDir,
Port: o.Port,
Timeout: o.Timeout,
}
f, err := server.NewFile(ctx, cfg)
if err != nil {
return err
}
l.Infof("starting file server on port [%d]", o.Port)
if err := f.ListenAndServe(); err != nil {
return err
}
return nil
}
func loadConfig(filename string) (*configuration.Configuration, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
return configuration.Parse(f)
}
func (o *ServeRegistryOpts) defaultRegistryConfig() *configuration.Configuration {
cfg := &configuration.Configuration{
Version: "0.1",
Storage: configuration.Storage{
"cache": configuration.Parameters{"blobdescriptor": "inmemory"},
"filesystem": configuration.Parameters{"rootdirectory": o.RootDir},
"maintenance": configuration.Parameters{
"readonly": map[any]any{"enabled": o.ReadOnly},
},
},
}
// Add validation configuration
cfg.Validation.Manifests.URLs.Allow = []string{".+"}
cfg.Log.Level = "info"
cfg.HTTP.Addr = fmt.Sprintf(":%d", o.Port)
cfg.HTTP.Headers = http.Header{
"X-Content-Type-Options": []string{"nosniff"},
}
return cfg
}

View File

@@ -0,0 +1,295 @@
package store
import (
"bufio"
"context"
"fmt"
"io"
"os"
"strings"
"github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"helm.sh/helm/v3/pkg/action"
"k8s.io/apimachinery/pkg/util/yaml"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
tchart "github.com/rancherfederal/hauler/pkg/collection/chart"
"github.com/rancherfederal/hauler/pkg/collection/imagetxt"
"github.com/rancherfederal/hauler/pkg/collection/k3s"
"github.com/rancherfederal/hauler/pkg/consts"
"github.com/rancherfederal/hauler/pkg/content"
"github.com/rancherfederal/hauler/pkg/cosign"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/reference"
"github.com/rancherfederal/hauler/pkg/store"
)
type SyncOpts struct {
*RootOpts
ContentFiles []string
Key string
Products []string
Platform string
Registry string
ProductRegistry string
}
func (o *SyncOpts) AddFlags(cmd *cobra.Command) {
f := cmd.Flags()
f.StringSliceVarP(&o.ContentFiles, "files", "f", []string{}, "Path(s) to local content files (Manifests). i.e. '--files ./rke2-files.yml")
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Path to the key for signature verification")
f.StringSliceVar(&o.Products, "products", []string{}, "(Optional) Feature for RGS Carbide customers to fetch collections and content from the Carbide Registry. i.e. '--product rancher=v2.8.5,rke2=v1.28.11+rke2r1'")
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specific platform to save. i.e. linux/amd64. Defaults to all if flag is omitted.")
f.StringVarP(&o.Registry, "registry", "r", "", "(Optional) Default pull registry for image refs that are not specifying a registry name.")
f.StringVarP(&o.ProductRegistry, "product-registry", "c", "", "(Optional) Specific Product Registry to use. Defaults to RGS Carbide Registry (rgcrprod.azurecr.us).")
}
func SyncCmd(ctx context.Context, o *SyncOpts, s *store.Layout) error {
l := log.FromContext(ctx)
// if passed products, check for a remote manifest to retrieve and use.
for _, product := range o.Products {
l.Infof("processing content file for product: '%s'", product)
parts := strings.Split(product, "=")
tag := strings.ReplaceAll(parts[1], "+", "-")
ProductRegistry := o.ProductRegistry // cli flag
// if no cli flag use CarbideRegistry.
if o.ProductRegistry == "" {
ProductRegistry = consts.CarbideRegistry
}
manifestLoc := fmt.Sprintf("%s/hauler/%s-manifest.yaml:%s", ProductRegistry, parts[0], tag)
l.Infof("retrieving product manifest from: '%s'", manifestLoc)
img := v1alpha1.Image{
Name: manifestLoc,
}
err := storeImage(ctx, s, img, o.Platform)
if err != nil {
return err
}
err = ExtractCmd(ctx, &ExtractOpts{RootOpts: o.RootOpts}, s, fmt.Sprintf("hauler/%s-manifest.yaml:%s", parts[0], tag))
if err != nil {
return err
}
filename := fmt.Sprintf("%s-manifest.yaml", parts[0])
fi, err := os.Open(filename)
if err != nil {
return err
}
err = processContent(ctx, fi, o, s)
if err != nil {
return err
}
}
// if passed a local manifest, process it
for _, filename := range o.ContentFiles {
l.Debugf("processing content file: '%s'", filename)
fi, err := os.Open(filename)
if err != nil {
return err
}
err = processContent(ctx, fi, o, s)
if err != nil {
return err
}
}
return nil
}
func processContent(ctx context.Context, fi *os.File, o *SyncOpts, s *store.Layout) error {
l := log.FromContext(ctx)
reader := yaml.NewYAMLReader(bufio.NewReader(fi))
var docs [][]byte
for {
raw, err := reader.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
docs = append(docs, raw)
}
for _, doc := range docs {
obj, err := content.Load(doc)
if err != nil {
l.Debugf("skipping sync of unknown content")
continue
}
l.Infof("syncing [%s] to store", obj.GroupVersionKind().String())
// TODO: Should type switch instead...
switch obj.GroupVersionKind().Kind {
case v1alpha1.FilesContentKind:
var cfg v1alpha1.Files
if err := yaml.Unmarshal(doc, &cfg); err != nil {
return err
}
for _, f := range cfg.Spec.Files {
err := storeFile(ctx, s, f)
if err != nil {
return err
}
}
case v1alpha1.ImagesContentKind:
var cfg v1alpha1.Images
if err := yaml.Unmarshal(doc, &cfg); err != nil {
return err
}
a := cfg.GetAnnotations()
for _, i := range cfg.Spec.Images {
// Check if the user provided a registry. If a registry is provided in the annotation, use it for the images that don't have a registry in their ref name.
if a[consts.ImageAnnotationRegistry] != "" || o.Registry != "" {
newRef, _ := reference.Parse(i.Name)
newReg := o.Registry // cli flag
// if no cli flag but there was an annotation, use the annotation.
if o.Registry == "" && a[consts.ImageAnnotationRegistry] != "" {
newReg = a[consts.ImageAnnotationRegistry]
}
if newRef.Context().RegistryStr() == "" {
newRef, err = reference.Relocate(i.Name, newReg)
if err != nil {
return err
}
}
i.Name = newRef.Name()
}
// Check if the user provided a key. The flag from the CLI takes precedence over the annotation. The individual image key takes precedence over both.
if a[consts.ImageAnnotationKey] != "" || o.Key != "" || i.Key != "" {
key := o.Key // cli flag
// if no cli flag but there was an annotation, use the annotation.
if o.Key == "" && a[consts.ImageAnnotationKey] != "" {
key, err = homedir.Expand(a[consts.ImageAnnotationKey])
if err != nil {
return err
}
}
// the individual image key trumps all
if i.Key != "" {
key, err = homedir.Expand(i.Key)
if err != nil {
return err
}
}
l.Debugf("key for image [%s]", key)
// verify signature using the provided key.
err := cosign.VerifySignature(ctx, s, key, i.Name)
if err != nil {
l.Errorf("signature verification failed for image [%s]. ** hauler will skip adding this image to the store **:\n%v", i.Name, err)
continue
}
l.Infof("signature verified for image [%s]", i.Name)
}
// Check if the user provided a platform. The flag from the CLI takes precedence over the annotation. The individual image platform takes precedence over both.
platform := o.Platform // cli flag
// if no cli flag but there was an annotation, use the annotation.
if o.Platform == "" && a[consts.ImageAnnotationPlatform] != "" {
platform = a[consts.ImageAnnotationPlatform]
}
// the individual image platform trumps all
if i.Platform != "" {
platform = i.Platform
}
err = storeImage(ctx, s, i, platform)
if err != nil {
return err
}
}
// sync with local index
s.CopyAll(ctx, s.OCI, nil)
case v1alpha1.ChartsContentKind:
var cfg v1alpha1.Charts
if err := yaml.Unmarshal(doc, &cfg); err != nil {
return err
}
for _, ch := range cfg.Spec.Charts {
// TODO: Provide a way to configure syncs
err := storeChart(ctx, s, ch, &action.ChartPathOptions{})
if err != nil {
return err
}
}
case v1alpha1.K3sCollectionKind:
var cfg v1alpha1.K3s
if err := yaml.Unmarshal(doc, &cfg); err != nil {
return err
}
k, err := k3s.NewK3s(cfg.Spec.Version)
if err != nil {
return err
}
if _, err := s.AddOCICollection(ctx, k); err != nil {
return err
}
case v1alpha1.ChartsCollectionKind:
var cfg v1alpha1.ThickCharts
if err := yaml.Unmarshal(doc, &cfg); err != nil {
return err
}
for _, cfg := range cfg.Spec.Charts {
tc, err := tchart.NewThickChart(cfg, &action.ChartPathOptions{
RepoURL: cfg.RepoURL,
Version: cfg.Version,
})
if err != nil {
return err
}
if _, err := s.AddOCICollection(ctx, tc); err != nil {
return err
}
}
case v1alpha1.ImageTxtsContentKind:
var cfg v1alpha1.ImageTxts
if err := yaml.Unmarshal(doc, &cfg); err != nil {
return err
}
for _, cfgIt := range cfg.Spec.ImageTxts {
it, err := imagetxt.New(cfgIt.Ref,
imagetxt.WithIncludeSources(cfgIt.Sources.Include...),
imagetxt.WithExcludeSources(cfgIt.Sources.Exclude...),
)
if err != nil {
return fmt.Errorf("convert ImageTxt %s: %v", cfg.Name, err)
}
if _, err := s.AddOCICollection(ctx, it); err != nil {
return fmt.Errorf("add ImageTxt %s to store: %v", cfg.Name, err)
}
}
default:
return fmt.Errorf("unrecognized content/collection type: %s", obj.GroupVersionKind().String())
}
}
return nil
}

40
cmd/hauler/cli/version.go Normal file
View File

@@ -0,0 +1,40 @@
package cli
import (
"fmt"
"github.com/spf13/cobra"
"github.com/rancherfederal/hauler/internal/version"
)
func addVersion(parent *cobra.Command) {
var json bool
cmd := &cobra.Command{
Use: "version",
Short: "Print the current version",
Aliases: []string{"v"},
RunE: func(cmd *cobra.Command, args []string) error {
v := version.GetVersionInfo()
v.Name = cmd.Root().Name()
v.Description = cmd.Root().Short
v.FontName = "starwars"
cmd.SetOut(cmd.OutOrStdout())
if json {
out, err := v.JSONString()
if err != nil {
return fmt.Errorf("unable to generate JSON from version info: %w", err)
}
cmd.Println(out)
} else {
cmd.Println(v.String())
}
return nil
},
}
cmd.Flags().BoolVar(&json, "json", false, "toggle output in JSON")
parent.AddCommand(cmd)
}

View File

@@ -1,15 +1,34 @@
package main
import (
"log"
"context"
"embed"
"os"
"github.com/rancherfederal/hauler/cmd/hauler/app"
"github.com/rancherfederal/hauler/cmd/hauler/cli"
"github.com/rancherfederal/hauler/pkg/cosign"
"github.com/rancherfederal/hauler/pkg/log"
)
func main() {
root := app.NewRootCommand()
//go:embed binaries/*
var binaries embed.FS
if err := root.Execute(); err != nil {
log.Fatalln(err)
func main() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewLogger(os.Stdout)
ctx = logger.WithContext(ctx)
// ensure cosign binary is available
if err := cosign.EnsureBinaryExists(ctx, binaries); err != nil {
logger.Errorf("%v", err)
os.Exit(1)
}
if err := cli.New().ExecuteContext(ctx); err != nil {
logger.Errorf("%v", err)
cancel()
os.Exit(1)
}
}

229
go.mod
View File

@@ -1,68 +1,175 @@
module github.com/rancherfederal/hauler
go 1.16
go 1.21
require (
cloud.google.com/go/storage v1.8.0 // indirect
github.com/Microsoft/go-winio v0.5.0 // indirect
github.com/containerd/containerd v1.5.0-beta.4
github.com/deislabs/oras v0.11.1
github.com/docker/docker v20.10.6+incompatible // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/google/go-containerregistry v0.5.1
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/imdario/mergo v0.3.12
github.com/klauspost/compress v1.13.0 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/mholt/archiver/v3 v3.5.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/otiai10/copy v1.6.0
github.com/pterm/pterm v0.12.24
github.com/rancher/fleet v0.3.5
github.com/rancher/fleet/pkg/apis v0.0.0
github.com/sirupsen/logrus v1.8.1
github.com/spf13/afero v1.6.0
github.com/spf13/cobra v1.1.3
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b // indirect
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 // indirect
google.golang.org/genproto v0.0.0-20210524171403-669157292da3 // indirect
google.golang.org/grpc v1.38.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
helm.sh/helm/v3 v3.5.1
k8s.io/apimachinery v0.21.1
k8s.io/cli-runtime v0.20.2
k8s.io/client-go v11.0.1-0.20190816222228-6d55c1b1f1ca+incompatible
sigs.k8s.io/cli-utils v0.23.1
sigs.k8s.io/controller-runtime v0.9.0
sigs.k8s.io/yaml v1.2.0
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be
github.com/containerd/containerd v1.7.11
github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2
github.com/docker/go-metrics v0.0.1
github.com/google/go-containerregistry v0.16.1
github.com/gorilla/handlers v1.5.1
github.com/gorilla/mux v1.8.0
github.com/mholt/archiver/v3 v3.5.1
github.com/mitchellh/go-homedir v1.1.0
github.com/olekukonko/tablewriter v0.0.5
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0-rc6
github.com/pkg/errors v0.9.1
github.com/rs/zerolog v1.31.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/afero v1.10.0
github.com/spf13/cobra v1.8.0
golang.org/x/sync v0.6.0
helm.sh/helm/v3 v3.14.2
k8s.io/apimachinery v0.29.0
k8s.io/client-go v0.29.0
oras.land/oras-go v1.2.5
)
replace (
github.com/rancher/fleet/pkg/apis v0.0.0 => github.com/rancher/fleet/pkg/apis v0.0.0-20210604212701-3a76c78716ab
helm.sh/helm/v3 => github.com/rancher/helm/v3 v3.3.3-fleet1
k8s.io/api => k8s.io/api v0.20.2
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.2 // indirect
k8s.io/apimachinery => k8s.io/apimachinery v0.20.2 // indirect
k8s.io/apiserver => k8s.io/apiserver v0.20.2
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.2
k8s.io/client-go => github.com/rancher/client-go v0.20.0-fleet1
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.2
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.2
k8s.io/code-generator => k8s.io/code-generator v0.20.2
k8s.io/component-base => k8s.io/component-base v0.20.2
k8s.io/component-helpers => k8s.io/component-helpers v0.20.2
k8s.io/controller-manager => k8s.io/controller-manager v0.20.2
k8s.io/cri-api => k8s.io/cri-api v0.20.2
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.2
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.2
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.2
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.2
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.2
k8s.io/kubectl => k8s.io/kubectl v0.20.2
k8s.io/kubelet => k8s.io/kubelet v0.20.2
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.2
k8s.io/metrics => k8s.io/metrics v0.20.2
k8s.io/mount-utils => k8s.io/mount-utils v0.20.2
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.2
require (
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/BurntSushi/toml v1.3.2 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.2.1 // indirect
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/hcsshim v0.11.4 // indirect
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect
github.com/andybalholm/brotli v1.0.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd // indirect
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b // indirect
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/reference v0.5.0 // indirect
github.com/docker/cli v25.0.1+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker v25.0.6+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v5.7.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.2 // indirect
github.com/gomodule/redigo v1.8.2 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/huandu/xstrings v1.4.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmoiron/sqlx v1.3.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.16.5 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/nwaples/rardecode v1.1.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pierrec/lz4/v4 v4.1.2 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/rubenv/sql-migrate v1.5.2 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/ulikunitz/xz v0.5.9 // indirect
github.com/vbatts/tar-split v0.11.3 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 // indirect
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 // indirect
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
go.opentelemetry.io/otel v1.19.0 // indirect
go.opentelemetry.io/otel/metric v1.19.0 // indirect
go.opentelemetry.io/otel/trace v1.19.0 // indirect
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/oauth2 v0.10.0 // indirect
golang.org/x/sys v0.18.0 // indirect
golang.org/x/term v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/grpc v1.58.3 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.29.0 // indirect
k8s.io/apiextensions-apiserver v0.29.0 // indirect
k8s.io/apiserver v0.29.0 // indirect
k8s.io/cli-runtime v0.29.0 // indirect
k8s.io/component-base v0.29.0 // indirect
k8s.io/klog/v2 v2.110.1 // indirect
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
k8s.io/kubectl v0.29.0 // indirect
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

2084
go.sum

File diff suppressed because it is too large Load Diff

215
install.sh Executable file
View File

@@ -0,0 +1,215 @@
#!/bin/bash
# Usage:
# - curl -sfL... | ENV_VAR=... bash
# - ENV_VAR=... ./install.sh
#
# Install Usage:
# Install Latest Release
# - curl -sfL https://get.hauler.dev | bash
# - ./install.sh
#
# Install Specific Release
# - curl -sfL https://get.hauler.dev | HAULER_VERSION=1.0.0 bash
# - HAULER_VERSION=1.0.0 ./install.sh
#
# Set Install Directory
# - curl -sfL https://get.hauler.dev | HAULER_INSTALL_DIR=/usr/local/bin bash
# - HAULER_INSTALL_DIR=/usr/local/bin ./install.sh
#
# Debug Usage:
# - curl -sfL https://get.hauler.dev | HAULER_DEBUG=true bash
# - HAULER_DEBUG=true ./install.sh
#
# Uninstall Usage:
# - curl -sfL https://get.hauler.dev | HAULER_UNINSTALL=true bash
# - HAULER_UNINSTALL=true ./install.sh
#
# Documentation:
# - https://hauler.dev
# - https://github.com/hauler-dev/hauler
# set functions for logging
function verbose {
echo "$1"
}
function info {
echo && echo "[INFO] Hauler: $1"
}
function warn {
echo && echo "[WARN] Hauler: $1"
}
function fatal {
echo && echo "[ERROR] Hauler: $1"
exit 1
}
# debug hauler from argument or environment variable
if [ "${HAULER_DEBUG}" = "true" ]; then
set -x
fi
# start hauler preflight checks
info "Starting Preflight Checks..."
# check for required packages and dependencies
for cmd in echo curl grep sed rm mkdir awk openssl tar install source; do
if ! command -v "$cmd" &> /dev/null; then
fatal "$cmd is required to install Hauler"
fi
done
# set install directory from argument or environment variable
HAULER_INSTALL_DIR=${HAULER_INSTALL_DIR:-/usr/local/bin}
# ensure install directory exists
if [ ! -d "${HAULER_INSTALL_DIR}" ]; then
mkdir -p "${HAULER_INSTALL_DIR}" || fatal "Failed to Create Install Directory: ${HAULER_INSTALL_DIR}"
fi
# ensure install directory is writable (by user or root privileges)
if [ ! -w "${HAULER_INSTALL_DIR}" ]; then
if [ "$(id -u)" -ne 0 ]; then
fatal "Root privileges are required to install Hauler to Directory: ${HAULER_INSTALL_DIR}"
fi
fi
# uninstall hauler from argument or environment variable
if [ "${HAULER_UNINSTALL}" = "true" ]; then
# remove the hauler binary
rm -rf "${HAULER_INSTALL_DIR}/hauler" || fatal "Failed to Remove Hauler from ${HAULER_INSTALL_DIR}"
# remove the working directory
rm -rf "$HOME/.hauler" || fatal "Failed to Remove Hauler Directory: $HOME/.hauler"
info "Successfully Uninstalled Hauler" && echo
exit 0
fi
# set version environment variable
if [ -z "${HAULER_VERSION}" ]; then
# attempt to retrieve the latest version from GitHub
HAULER_VERSION=$(curl -s https://api.github.com/repos/hauler-dev/hauler/releases/latest | grep '"tag_name":' | sed 's/.*"v\([^"]*\)".*/\1/')
# exit if the version could not be detected
if [ -z "${HAULER_VERSION}" ]; then
fatal "HAULER_VERSION is unable to be detected and/or retrieved from GitHub"
fi
fi
# detect the operating system
PLATFORM=$(uname -s | tr '[:upper:]' '[:lower:]')
case $PLATFORM in
linux)
PLATFORM="linux"
;;
darwin)
PLATFORM="darwin"
;;
*)
fatal "Unsupported Platform: $PLATFORM"
;;
esac
# detect the architecture
ARCH=$(uname -m)
case $ARCH in
x86_64 | x86-32 | x64 | x32 | amd64)
ARCH="amd64"
;;
aarch64 | arm64)
ARCH="arm64"
;;
*)
fatal "Unsupported Architecture: $ARCH"
;;
esac
# start hauler installation
info "Starting Installation..."
# display the version, platform, and architecture
verbose "- Version: v${HAULER_VERSION}"
verbose "- Platform: $PLATFORM"
verbose "- Architecture: $ARCH"
verbose "- Install Directory: ${HAULER_INSTALL_DIR}"
# check working directory and/or create it
if [ ! -d "$HOME/.hauler" ]; then
mkdir -p "$HOME/.hauler" || fatal "Failed to Create Directory: $HOME/.hauler"
fi
# update permissions of working directory
chmod -R 777 "$HOME/.hauler" || fatal "Failed to Update Permissions of Directory: $HOME/.hauler"
# change to working directory
cd "$HOME/.hauler" || fatal "Failed to Change Directory: $HOME/.hauler"
# start hauler artifacts download
info "Starting Download..."
# download the checksum file
if ! curl -sfOL "https://github.com/hauler-dev/hauler/releases/download/v${HAULER_VERSION}/hauler_${HAULER_VERSION}_checksums.txt"; then
fatal "Failed to Download: hauler_${HAULER_VERSION}_checksums.txt"
fi
# download the archive file
if ! curl -sfOL "https://github.com/hauler-dev/hauler/releases/download/v${HAULER_VERSION}/hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz"; then
fatal "Failed to Download: hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz"
fi
# start hauler checksum verification
info "Starting Checksum Verification..."
# verify the Hauler checksum
EXPECTED_CHECKSUM=$(awk -v HAULER_VERSION="${HAULER_VERSION}" -v PLATFORM="${PLATFORM}" -v ARCH="${ARCH}" '$2 == "hauler_"HAULER_VERSION"_"PLATFORM"_"ARCH".tar.gz" {print $1}' "hauler_${HAULER_VERSION}_checksums.txt")
DETERMINED_CHECKSUM=$(openssl dgst -sha256 "hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz" | awk '{print $2}')
if [ -z "${EXPECTED_CHECKSUM}" ]; then
fatal "Failed to Locate Checksum: hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz"
elif [ "${DETERMINED_CHECKSUM}" = "${EXPECTED_CHECKSUM}" ]; then
verbose "- Expected Checksum: ${EXPECTED_CHECKSUM}"
verbose "- Determined Checksum: ${DETERMINED_CHECKSUM}"
verbose "- Successfully Verified Checksum: hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz"
else
verbose "- Expected: ${EXPECTED_CHECKSUM}"
verbose "- Determined: ${DETERMINED_CHECKSUM}"
fatal "Failed Checksum Verification: hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz"
fi
# uncompress the hauler archive
tar -xzf "hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz" || fatal "Failed to Extract: hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz"
# install the hauler binary
install -m 755 hauler "${HAULER_INSTALL_DIR}" || fatal "Failed to Install Hauler: ${HAULER_INSTALL_DIR}"
# add hauler to the path
if [[ ":$PATH:" != *":${HAULER_INSTALL_DIR}:"* ]]; then
if [ -f "$HOME/.bashrc" ]; then
echo "export PATH=\$PATH:${HAULER_INSTALL_DIR}" >> "$HOME/.bashrc"
source "$HOME/.bashrc"
elif [ -f "$HOME/.bash_profile" ]; then
echo "export PATH=\$PATH:${HAULER_INSTALL_DIR}" >> "$HOME/.bash_profile"
source "$HOME/.bash_profile"
elif [ -f "$HOME/.zshrc" ]; then
echo "export PATH=\$PATH:${HAULER_INSTALL_DIR}" >> "$HOME/.zshrc"
source "$HOME/.zshrc"
elif [ -f "$HOME/.profile" ]; then
echo "export PATH=\$PATH:${HAULER_INSTALL_DIR}" >> "$HOME/.profile"
source "$HOME/.profile"
else
warn "Failed to add ${HAULER_INSTALL_DIR} to PATH: Unsupported Shell"
fi
fi
# display success message
info "Successfully Installed Hauler at ${HAULER_INSTALL_DIR}/hauler"
# display availability message
info "Hauler v${HAULER_VERSION} is now available for use!"
# display hauler docs message
verbose "- Documentation: https://hauler.dev" && echo

View File

@@ -0,0 +1,86 @@
package mapper
import (
"context"
"io"
"os"
"path/filepath"
"strings"
ccontent "github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"oras.land/oras-go/pkg/content"
)
// NewMapperFileStore creates a new file store that uses mapper functions for each detected descriptor.
//
// This extends content.File, and differs in that it allows much more functionality into how each descriptor is written.
func NewMapperFileStore(root string, mapper map[string]Fn) *store {
fs := content.NewFile(root)
return &store{
File: fs,
mapper: mapper,
}
}
func (s *store) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
var tag, hash string
parts := strings.SplitN(ref, "@", 2)
if len(parts) > 0 {
tag = parts[0]
}
if len(parts) > 1 {
hash = parts[1]
}
return &pusher{
store: s.File,
tag: tag,
ref: hash,
mapper: s.mapper,
}, nil
}
type store struct {
*content.File
mapper map[string]Fn
}
func (s *pusher) Push(ctx context.Context, desc ocispec.Descriptor) (ccontent.Writer, error) {
// TODO: This is suuuuuper ugly... redo this when oras v2 is out
if _, ok := content.ResolveName(desc); ok {
p, err := s.store.Pusher(ctx, s.ref)
if err != nil {
return nil, err
}
return p.Push(ctx, desc)
}
// If no custom mapper found, fall back to content.File mapper
if _, ok := s.mapper[desc.MediaType]; !ok {
return content.NewIoContentWriter(io.Discard, content.WithOutputHash(desc.Digest)), nil
}
filename, err := s.mapper[desc.MediaType](desc)
if err != nil {
return nil, err
}
fullFileName := filepath.Join(s.store.ResolvePath(""), filename)
// TODO: Don't rewrite everytime, we can check the digest
f, err := os.OpenFile(fullFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return nil, errors.Wrap(err, "pushing file")
}
w := content.NewIoContentWriter(f, content.WithInputHash(desc.Digest), content.WithOutputHash(desc.Digest))
return w, nil
}
type pusher struct {
store *content.File
tag string
ref string
mapper map[string]Fn
}

View File

@@ -0,0 +1,83 @@
package mapper
import (
"fmt"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"oras.land/oras-go/pkg/target"
"github.com/rancherfederal/hauler/pkg/consts"
)
type Fn func(desc ocispec.Descriptor) (string, error)
// FromManifest will return the appropriate content store given a reference and source type adequate for storing the results on disk
func FromManifest(manifest ocispec.Manifest, root string) (target.Target, error) {
// TODO: Don't rely solely on config mediatype
switch manifest.Config.MediaType {
case consts.DockerConfigJSON, consts.OCIManifestSchema1:
s := NewMapperFileStore(root, Images())
defer s.Close()
return s, nil
case consts.ChartLayerMediaType, consts.ChartConfigMediaType:
s := NewMapperFileStore(root, Chart())
defer s.Close()
return s, nil
default:
s := NewMapperFileStore(root, nil)
defer s.Close()
return s, nil
}
}
func Images() map[string]Fn {
m := make(map[string]Fn)
manifestMapperFn := Fn(func(desc ocispec.Descriptor) (string, error) {
return "manifest.json", nil
})
for _, l := range []string{consts.DockerManifestSchema2, consts.DockerManifestListSchema2, consts.OCIManifestSchema1} {
m[l] = manifestMapperFn
}
layerMapperFn := Fn(func(desc ocispec.Descriptor) (string, error) {
return fmt.Sprintf("%s.tar.gz", desc.Digest.String()), nil
})
for _, l := range []string{consts.OCILayer, consts.DockerLayer} {
m[l] = layerMapperFn
}
configMapperFn := Fn(func(desc ocispec.Descriptor) (string, error) {
return "config.json", nil
})
for _, l := range []string{consts.DockerConfigJSON} {
m[l] = configMapperFn
}
return m
}
func Chart() map[string]Fn {
m := make(map[string]Fn)
chartMapperFn := Fn(func(desc ocispec.Descriptor) (string, error) {
f := "chart.tar.gz"
if _, ok := desc.Annotations[ocispec.AnnotationTitle]; ok {
f = desc.Annotations[ocispec.AnnotationTitle]
}
return f, nil
})
provMapperFn := Fn(func(desc ocispec.Descriptor) (string, error) {
return "prov.json", nil
})
m[consts.ChartLayerMediaType] = chartMapperFn
m[consts.ProvLayerMediaType] = provMapperFn
return m
}

46
internal/server/file.go Normal file
View File

@@ -0,0 +1,46 @@
package server
import (
"context"
"fmt"
"net/http"
"os"
"time"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
)
type FileConfig struct {
Root string
Host string
Port int
Timeout int
}
// NewFile returns a fileserver
// TODO: Better configs
func NewFile(ctx context.Context, cfg FileConfig) (Server, error) {
r := mux.NewRouter()
r.PathPrefix("/").Handler(handlers.LoggingHandler(os.Stdout, http.StripPrefix("/", http.FileServer(http.Dir(cfg.Root)))))
if cfg.Root == "" {
cfg.Root = "."
}
if cfg.Port == 0 {
cfg.Port = 8080
}
if cfg.Timeout == 0 {
cfg.Timeout = 60
}
srv := &http.Server{
Handler: r,
Addr: fmt.Sprintf(":%d", cfg.Port),
WriteTimeout: time.Duration(cfg.Timeout) * time.Second,
ReadTimeout: time.Duration(cfg.Timeout) * time.Second,
}
return srv, nil
}

122
internal/server/registry.go Normal file
View File

@@ -0,0 +1,122 @@
package server
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"time"
"github.com/distribution/distribution/v3/configuration"
"github.com/distribution/distribution/v3/registry"
"github.com/distribution/distribution/v3/registry/handlers"
"github.com/docker/go-metrics"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
func NewRegistry(ctx context.Context, cfg *configuration.Configuration) (*registry.Registry, error) {
r, err := registry.NewRegistry(ctx, cfg)
if err != nil {
return nil, err
}
if cfg.HTTP.Debug.Prometheus.Enabled {
path := cfg.HTTP.Debug.Prometheus.Path
if path == "" {
path = "/metrics"
}
http.Handle(path, metrics.Handler())
}
return r, nil
}
type tmpRegistryServer struct {
*httptest.Server
}
func NewTempRegistry(ctx context.Context, root string) *tmpRegistryServer {
cfg := &configuration.Configuration{
Version: "0.1",
Storage: configuration.Storage{
"cache": configuration.Parameters{"blobdescriptor": "inmemory"},
"filesystem": configuration.Parameters{"rootdirectory": root},
},
}
// Add validation configuration
cfg.Validation.Manifests.URLs.Allow = []string{".+"}
cfg.Log.Level = "error"
cfg.HTTP.Headers = http.Header{
"X-Content-Type-Options": []string{"nosniff"},
}
l, err := logrus.ParseLevel("panic")
if err != nil {
l = logrus.ErrorLevel
}
logrus.SetLevel(l)
app := handlers.NewApp(ctx, cfg)
app.RegisterHealthChecks()
handler := alive("/", app)
s := httptest.NewUnstartedServer(handler)
return &tmpRegistryServer{
Server: s,
}
}
// Registry returns the URL of the server without the protocol, suitable for content references
func (t *tmpRegistryServer) Registry() string {
return strings.Replace(t.Server.URL, "http://", "", 1)
}
func (t *tmpRegistryServer) Start() error {
t.Server.Start()
err := retry(5, 1*time.Second, func() (err error) {
resp, err := http.Get(t.Server.URL + "/v2")
if err != nil {
return err
}
resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return nil
}
return errors.New("to start temporary registry")
})
return err
}
func (t *tmpRegistryServer) Stop() {
t.Server.Close()
}
func alive(path string, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == path {
w.Header().Set("Cache-Control", "no-cache")
w.WriteHeader(http.StatusOK)
return
}
handler.ServeHTTP(w, r)
})
}
func retry(attempts int, sleep time.Duration, f func() error) (err error) {
for i := 0; i < attempts; i++ {
if i > 0 {
time.Sleep(sleep)
sleep *= 2
}
err = f()
if err == nil {
return nil
}
}
return fmt.Errorf("after %d attempts, last error: %s", attempts, err)
}

View File

@@ -0,0 +1,5 @@
package server
type Server interface {
ListenAndServe() error
}

229
internal/version/version.go Normal file
View File

@@ -0,0 +1,229 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package version
import (
"encoding/json"
"fmt"
"os"
"runtime"
"runtime/debug"
"strings"
"sync"
"text/tabwriter"
"time"
"github.com/common-nighthawk/go-figure"
)
const unknown = "unknown"
// Base version information.
//
// This is the fallback data used when version information from git is not
// provided via go ldflags.
var (
// Output of "git describe". The prerequisite is that the
// branch should be tagged using the correct versioning strategy.
gitVersion = "devel"
// SHA1 from git, output of $(git rev-parse HEAD)
gitCommit = unknown
// State of git tree, either "clean" or "dirty"
gitTreeState = unknown
// Build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
buildDate = unknown
// flag to print the ascii name banner
asciiName = "true"
// goVersion is the used golang version.
goVersion = unknown
// compiler is the used golang compiler.
compiler = unknown
// platform is the used os/arch identifier.
platform = unknown
once sync.Once
info = Info{}
)
type Info struct {
GitVersion string `json:"gitVersion"`
GitCommit string `json:"gitCommit"`
GitTreeState string `json:"gitTreeState"`
BuildDate string `json:"buildDate"`
GoVersion string `json:"goVersion"`
Compiler string `json:"compiler"`
Platform string `json:"platform"`
ASCIIName string `json:"-"`
FontName string `json:"-"`
Name string `json:"-"`
Description string `json:"-"`
}
func getBuildInfo() *debug.BuildInfo {
bi, ok := debug.ReadBuildInfo()
if !ok {
return nil
}
return bi
}
func getGitVersion(bi *debug.BuildInfo) string {
if bi == nil {
return unknown
}
// TODO: remove this when the issue https://github.com/golang/go/issues/29228 is fixed
if bi.Main.Version == "(devel)" || bi.Main.Version == "" {
return gitVersion
}
return bi.Main.Version
}
func getCommit(bi *debug.BuildInfo) string {
return getKey(bi, "vcs.revision")
}
func getDirty(bi *debug.BuildInfo) string {
modified := getKey(bi, "vcs.modified")
if modified == "true" {
return "dirty"
}
if modified == "false" {
return "clean"
}
return unknown
}
func getBuildDate(bi *debug.BuildInfo) string {
buildTime := getKey(bi, "vcs.time")
t, err := time.Parse("2006-01-02T15:04:05Z", buildTime)
if err != nil {
return unknown
}
return t.Format("2006-01-02T15:04:05")
}
func getKey(bi *debug.BuildInfo, key string) string {
if bi == nil {
return unknown
}
for _, iter := range bi.Settings {
if iter.Key == key {
return iter.Value
}
}
return unknown
}
// GetVersionInfo represents known information on how this binary was built.
func GetVersionInfo() Info {
once.Do(func() {
buildInfo := getBuildInfo()
gitVersion = getGitVersion(buildInfo)
if gitCommit == unknown {
gitCommit = getCommit(buildInfo)
}
if gitTreeState == unknown {
gitTreeState = getDirty(buildInfo)
}
if buildDate == unknown {
buildDate = getBuildDate(buildInfo)
}
if goVersion == unknown {
goVersion = runtime.Version()
}
if compiler == unknown {
compiler = runtime.Compiler
}
if platform == unknown {
platform = fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)
}
info = Info{
ASCIIName: asciiName,
GitVersion: gitVersion,
GitCommit: gitCommit,
GitTreeState: gitTreeState,
BuildDate: buildDate,
GoVersion: goVersion,
Compiler: compiler,
Platform: platform,
}
})
return info
}
// String returns the string representation of the version info
func (i *Info) String() string {
b := strings.Builder{}
w := tabwriter.NewWriter(&b, 0, 0, 2, ' ', 0)
// name and description are optional.
if i.Name != "" {
if i.ASCIIName == "true" {
f := figure.NewFigure(strings.ToUpper(i.Name), i.FontName, true)
_, _ = fmt.Fprint(w, f.String())
}
_, _ = fmt.Fprint(w, i.Name)
if i.Description != "" {
_, _ = fmt.Fprintf(w, ": %s", i.Description)
}
_, _ = fmt.Fprint(w, "\n\n")
}
_, _ = fmt.Fprintf(w, "GitVersion:\t%s\n", i.GitVersion)
_, _ = fmt.Fprintf(w, "GitCommit:\t%s\n", i.GitCommit)
_, _ = fmt.Fprintf(w, "GitTreeState:\t%s\n", i.GitTreeState)
_, _ = fmt.Fprintf(w, "BuildDate:\t%s\n", i.BuildDate)
_, _ = fmt.Fprintf(w, "GoVersion:\t%s\n", i.GoVersion)
_, _ = fmt.Fprintf(w, "Compiler:\t%s\n", i.Compiler)
_, _ = fmt.Fprintf(w, "Platform:\t%s\n", i.Platform)
_ = w.Flush()
return b.String()
}
// JSONString returns the JSON representation of the version info
func (i *Info) JSONString() (string, error) {
b, err := json.MarshalIndent(i, "", " ")
if err != nil {
return "", err
}
return string(b), nil
}
func (i *Info) CheckFontName(fontName string) bool {
assetNames := figure.AssetNames()
for _, font := range assetNames {
if strings.Contains(font, fontName) {
return true
}
}
fmt.Fprintln(os.Stderr, "font not valid, using default")
return false
}

View File

@@ -0,0 +1,47 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
ChartsContentKind = "Charts"
ChartsCollectionKind = "ThickCharts"
)
type Charts struct {
*metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ChartSpec `json:"spec,omitempty"`
}
type ChartSpec struct {
Charts []Chart `json:"charts,omitempty"`
}
type Chart struct {
Name string `json:"name,omitempty"`
RepoURL string `json:"repoURL,omitempty"`
Version string `json:"version,omitempty"`
}
type ThickCharts struct {
*metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ThickChartSpec `json:"spec,omitempty"`
}
type ThickChartSpec struct {
Charts []ThickChart `json:"charts,omitempty"`
}
type ThickChart struct {
Chart `json:",inline,omitempty"`
ExtraImages []ChartImage `json:"extraImages,omitempty"`
}
type ChartImage struct {
Reference string `json:"ref"`
}

View File

@@ -1,91 +1,21 @@
package v1alpha1
import (
"sigs.k8s.io/cli-utils/pkg/object"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type Drive interface {
Images() ([]string, error)
BinURL() string
const (
DriverContentKind = "Driver"
)
LibPath() string
EtcPath() string
Config() (*map[string]interface{}, error)
SystemObjects() (objs []object.ObjMetadata)
type Driver struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DriverSpec `json:"spec"`
}
//Driver
type Driver struct {
type DriverSpec struct {
Type string `json:"type"`
Version string `json:"version"`
}
////TODO: Don't hardcode this
//func (k k3s) BinURL() string {
// return "https://github.com/k3s-io/k3s/releases/download/v1.21.1%2Bk3s1/k3s"
//}
//
//func (k k3s) PackageImages() ([]string, error) {
// //TODO: Replace this with a query to images.txt on release page
// return []string{
// "docker.io/rancher/coredns-coredns:1.8.3",
// "docker.io/rancher/klipper-helm:v0.5.0-build20210505",
// "docker.io/rancher/klipper-lb:v0.2.0",
// "docker.io/rancher/library-busybox:1.32.1",
// "docker.io/rancher/library-traefik:2.4.8",
// "docker.io/rancher/local-path-provisioner:v0.0.19",
// "docker.io/rancher/metrics-server:v0.3.6",
// "docker.io/rancher/pause:3.1",
// }, nil
//}
//
//func (k k3s) Config() (*map[string]interface{}, error) {
// // TODO: This should be typed
// c := make(map[string]interface{})
// c["write-kubeconfig-mode"] = "0644"
//
// //TODO: Add uid or something to ensure this works for multi-node setups
// c["node-name"] = "hauler"
//
// return &c, nil
//}
//
//func (k k3s) SystemObjects() (objs []object.ObjMetadata) {
// //TODO: Make sure this matches up with specified config disables
// for _, dep := range []string{"coredns", "local-path-provisioner", "metrics-server"} {
// objMeta, _ := object.CreateObjMetadata("kube-system", dep, schema.GroupKind{Kind: "Deployment", Group: "apps"})
// objs = append(objs, objMeta)
// }
// return objs
//}
//
//func (k k3s) LibPath() string { return "/var/lib/rancher/k3s" }
//func (k k3s) EtcPath() string { return "/etc/rancher/k3s" }
//
////TODO: Implement rke2 as a driver
//type rke2 struct{}
//
//func (r rke2) PackageImages() ([]string, error) { return []string{}, nil }
//func (r rke2) BinURL() string { return "" }
//func (r rke2) LibPath() string { return "" }
//func (r rke2) EtcPath() string { return "" }
//func (r rke2) Config() (*map[string]interface{}, error) { return nil, nil }
//func (r rke2) SystemObjects() (objs []object.ObjMetadata) { return objs }
//
////NewDriver will return the appropriate driver given a kind, defaults to k3s
//func NewDriver(kind string) Drive {
// var d Drive
// switch kind {
// case "rke2":
// //TODO
// d = rke2{}
//
// default:
// d = k3s{
// dataDir: "/var/lib/rancher/k3s",
// etcDir: "/etc/rancher/k3s",
// }
// }
//
// return d
//}

View File

@@ -0,0 +1,27 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const FilesContentKind = "Files"
type Files struct {
*metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec FileSpec `json:"spec,omitempty"`
}
type FileSpec struct {
Files []File `json:"files,omitempty"`
}
type File struct {
// Path is the path to the file contents, can be a local or remote path
Path string `json:"path"`
// Name is an optional field specifying the name of the file when specified,
// it will override any dynamic name discovery from Path
Name string `json:"name,omitempty"`
}

View File

@@ -1,32 +0,0 @@
package v1alpha1
import (
"fmt"
"strings"
)
//Fleet is used as the deployment engine for all things Hauler
type Fleet struct {
//Version of fleet to package and use in deployment
Version string `json:"version"`
}
//TODO: These should be identified from the chart version
func (f Fleet) Images() ([]string, error) {
return []string{
fmt.Sprintf("rancher/gitjob:v0.1.15"),
fmt.Sprintf("rancher/fleet:%s", f.Version),
fmt.Sprintf("rancher/fleet-agent:%s", f.Version),
}, nil
}
func (f Fleet) CRDChart() string {
return fmt.Sprintf("https://github.com/rancher/fleet/releases/download/%s/fleet-crd-%s.tgz", f.Version, f.VLess())
}
func (f Fleet) Chart() string {
return fmt.Sprintf("https://github.com/rancher/fleet/releases/download/%s/fleet-%s.tgz", f.Version, f.VLess())
}
func (f Fleet) VLess() string {
return strings.ReplaceAll(f.Version, "v", "")
}

View File

@@ -0,0 +1,18 @@
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
Version = "v1alpha1"
ContentGroup = "content.hauler.cattle.io"
CollectionGroup = "collection.hauler.cattle.io"
)
var (
ContentGroupVersion = schema.GroupVersion{Group: ContentGroup, Version: Version}
// SchemeBuilder = &scheme.Builder{GroupVersion: ContentGroupVersion}
CollectionGroupVersion = schema.GroupVersion{Group: CollectionGroup, Version: Version}
)

View File

@@ -0,0 +1,31 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const ImagesContentKind = "Images"
type Images struct {
*metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ImageSpec `json:"spec,omitempty"`
}
type ImageSpec struct {
Images []Image `json:"images,omitempty"`
}
type Image struct {
// Name is the full location for the image, can be referenced by tags or digests
Name string `json:"name"`
// Path is the path to the cosign public key used for verifying image signatures
//Key string `json:"key,omitempty"`
Key string `json:"key"`
// Platform of the image to be pulled. If not specified, all platforms will be pulled.
//Platform string `json:"key,omitempty"`
Platform string `json:"platform"`
}

View File

@@ -0,0 +1,30 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
ImageTxtsContentKind = "ImageTxts"
)
type ImageTxts struct {
*metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ImageTxtsSpec `json:"spec,omitempty"`
}
type ImageTxtsSpec struct {
ImageTxts []ImageTxt `json:"imageTxts,omitempty"`
}
type ImageTxt struct {
Ref string `json:"ref,omitempty"`
Sources ImageTxtSources `json:"sources,omitempty"`
}
type ImageTxtSources struct {
Include []string `json:"include,omitempty"`
Exclude []string `json:"exclude,omitempty"`
}

View File

@@ -0,0 +1,19 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const K3sCollectionKind = "K3s"
type K3s struct {
*metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec K3sSpec `json:"spec,omitempty"`
}
type K3sSpec struct {
Version string `json:"version"`
Arch string `json:"arch"`
}

View File

@@ -1,53 +0,0 @@
package v1alpha1
import (
"os"
"path/filepath"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
const (
BundlesDir = "bundles"
LayoutDir = "layout"
BinDir = "bin"
ChartDir = "charts"
PackageFile = "package.json"
)
type Package struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PackageSpec `json:"spec"`
}
type PackageSpec struct {
Fleet Fleet `json:"fleet"`
Driver Driver `json:"driver"`
// Paths is the list of directories relative to the working directory contains all resources to be bundled.
// path globbing is supported, for example [ "charts/*" ] will match all folders as a subdirectory of charts/
// If empty, "/" is the default
Paths []string `json:"paths,omitempty"`
Images []string `json:"images,omitempty"`
}
//LoadPackageFromDir will load an existing package from a directory on disk, it fails if no PackageFile is found in dir
func LoadPackageFromDir(path string) (Package, error) {
data, err := os.ReadFile(filepath.Join(path, PackageFile))
if err != nil {
return Package{}, err
}
var p Package
if err := yaml.Unmarshal(data, &p); err != nil {
return Package{}, err
}
return p, nil
}

92
pkg/artifacts/config.go Normal file
View File

@@ -0,0 +1,92 @@
package artifacts
import (
"bytes"
"encoding/json"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/rancherfederal/hauler/pkg/consts"
)
var _ partial.Describable = (*marshallableConfig)(nil)
type Config interface {
// Raw returns the config bytes
Raw() ([]byte, error)
Digest() (v1.Hash, error)
MediaType() (types.MediaType, error)
Size() (int64, error)
}
type Marshallable interface{}
type ConfigOption func(*marshallableConfig)
// ToConfig takes anything that is marshallabe and converts it into a Config
func ToConfig(i Marshallable, opts ...ConfigOption) Config {
mc := &marshallableConfig{Marshallable: i}
for _, o := range opts {
o(mc)
}
return mc
}
func WithConfigMediaType(mediaType string) ConfigOption {
return func(config *marshallableConfig) {
config.mediaType = mediaType
}
}
// marshallableConfig implements Config using helper methods
type marshallableConfig struct {
Marshallable
mediaType string
}
func (c *marshallableConfig) MediaType() (types.MediaType, error) {
mt := c.mediaType
if mt == "" {
mt = consts.UnknownManifest
}
return types.MediaType(mt), nil
}
func (c *marshallableConfig) Raw() ([]byte, error) {
return json.Marshal(c.Marshallable)
}
func (c *marshallableConfig) Digest() (v1.Hash, error) {
return Digest(c)
}
func (c *marshallableConfig) Size() (int64, error) {
return Size(c)
}
type WithRawConfig interface {
Raw() ([]byte, error)
}
func Digest(c WithRawConfig) (v1.Hash, error) {
b, err := c.Raw()
if err != nil {
return v1.Hash{}, err
}
digest, _, err := v1.SHA256(bytes.NewReader(b))
return digest, err
}
func Size(c WithRawConfig) (int64, error) {
b, err := c.Raw()
if err != nil {
return -1, err
}
return int64(len(b)), nil
}

116
pkg/artifacts/file/file.go Normal file
View File

@@ -0,0 +1,116 @@
package file
import (
"context"
gv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
gtypes "github.com/google/go-containerregistry/pkg/v1/types"
"github.com/rancherfederal/hauler/pkg/artifacts"
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
"github.com/rancherfederal/hauler/pkg/consts"
)
// interface guard
var _ artifacts.OCI = (*File)(nil)
// File implements the OCI interface for File API objects. API spec information is
// stored into the Path field.
type File struct {
Path string
computed bool
client *getter.Client
config artifacts.Config
blob gv1.Layer
manifest *gv1.Manifest
annotations map[string]string
}
func NewFile(path string, opts ...Option) *File {
client := getter.NewClient(getter.ClientOptions{})
f := &File{
client: client,
Path: path,
}
for _, opt := range opts {
opt(f)
}
return f
}
// Name is the name of the file's reference
func (f *File) Name(path string) string {
return f.client.Name(path)
}
func (f *File) MediaType() string {
return consts.OCIManifestSchema1
}
func (f *File) RawConfig() ([]byte, error) {
if err := f.compute(); err != nil {
return nil, err
}
return f.config.Raw()
}
func (f *File) Layers() ([]gv1.Layer, error) {
if err := f.compute(); err != nil {
return nil, err
}
var layers []gv1.Layer
layers = append(layers, f.blob)
return layers, nil
}
func (f *File) Manifest() (*gv1.Manifest, error) {
if err := f.compute(); err != nil {
return nil, err
}
return f.manifest, nil
}
func (f *File) compute() error {
if f.computed {
return nil
}
ctx := context.TODO()
blob, err := f.client.LayerFrom(ctx, f.Path)
if err != nil {
return err
}
layer, err := partial.Descriptor(blob)
if err != nil {
return err
}
cfg := f.client.Config(f.Path)
if cfg == nil {
cfg = f.client.Config(f.Path)
}
cfgDesc, err := partial.Descriptor(cfg)
if err != nil {
return err
}
m := &gv1.Manifest{
SchemaVersion: 2,
MediaType: gtypes.MediaType(f.MediaType()),
Config: *cfgDesc,
Layers: []gv1.Descriptor{*layer},
Annotations: f.annotations,
}
f.manifest = m
f.config = cfg
f.blob = blob
f.computed = true
return nil
}

View File

@@ -0,0 +1,166 @@
package file_test
import (
"bytes"
"context"
"io"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"testing"
"github.com/spf13/afero"
"github.com/rancherfederal/hauler/pkg/artifacts/file"
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
"github.com/rancherfederal/hauler/pkg/consts"
)
var (
filename = "myfile.yaml"
data = []byte(`data`)
ts *httptest.Server
tfs afero.Fs
mc *getter.Client
)
func TestMain(m *testing.M) {
teardown := setup()
defer teardown()
code := m.Run()
os.Exit(code)
}
func Test_file_Config(t *testing.T) {
tests := []struct {
name string
ref string
want string
wantErr bool
}{
{
name: "should properly type local file",
ref: filename,
want: consts.FileLocalConfigMediaType,
wantErr: false,
},
{
name: "should properly type remote file",
ref: ts.URL + "/" + filename,
want: consts.FileHttpConfigMediaType,
wantErr: false,
},
// TODO: Add directory test
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
f := file.NewFile(tt.ref, file.WithClient(mc))
f.MediaType()
m, err := f.Manifest()
if err != nil {
t.Fatal(err)
}
got := string(m.Config.MediaType)
if got != tt.want {
t.Errorf("unxpected mediatype; got %s, want %s", got, tt.want)
}
})
}
}
func Test_file_Layers(t *testing.T) {
tests := []struct {
name string
ref string
want []byte
wantErr bool
}{
{
name: "should load a local file and preserve contents",
ref: filename,
want: data,
wantErr: false,
},
{
name: "should load a remote file and preserve contents",
ref: ts.URL + "/" + filename,
want: data,
wantErr: false,
},
// TODO: Add directory test
}
for _, tt := range tests {
t.Run(tt.name, func(it *testing.T) {
f := file.NewFile(tt.ref, file.WithClient(mc))
layers, err := f.Layers()
if (err != nil) != tt.wantErr {
it.Fatalf("unexpected Layers() error: got %v, want %v", err, tt.wantErr)
}
rc, err := layers[0].Compressed()
if err != nil {
it.Fatal(err)
}
got, err := io.ReadAll(rc)
if err != nil {
it.Fatal(err)
}
if !bytes.Equal(got, tt.want) {
it.Fatalf("unexpected Layers(): got %v, want %v", layers, tt.want)
}
})
}
}
func setup() func() {
tfs = afero.NewMemMapFs()
afero.WriteFile(tfs, filename, data, 0644)
mf := &mockFile{File: getter.NewFile(), fs: tfs}
mockHttp := getter.NewHttp()
mhttp := afero.NewHttpFs(tfs)
fileserver := http.FileServer(mhttp.Dir("."))
http.Handle("/", fileserver)
ts = httptest.NewServer(fileserver)
mc = &getter.Client{
Options: getter.ClientOptions{},
Getters: map[string]getter.Getter{
"file": mf,
"http": mockHttp,
},
}
teardown := func() {
defer ts.Close()
}
return teardown
}
type mockFile struct {
*getter.File
fs afero.Fs
}
func (m mockFile) Open(ctx context.Context, u *url.URL) (io.ReadCloser, error) {
return m.fs.Open(filepath.Join(u.Host, u.Path))
}
func (m mockFile) Detect(u *url.URL) bool {
fi, err := m.fs.Stat(filepath.Join(u.Host, u.Path))
if err != nil {
return false
}
return !fi.IsDir()
}

View File

@@ -0,0 +1,165 @@
package getter
import (
"archive/tar"
"compress/gzip"
"context"
"io"
"net/url"
"os"
"path/filepath"
"time"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/rancherfederal/hauler/pkg/artifacts"
"github.com/rancherfederal/hauler/pkg/consts"
)
type directory struct {
*File
}
func NewDirectory() *directory {
return &directory{File: NewFile()}
}
func (d directory) Open(ctx context.Context, u *url.URL) (io.ReadCloser, error) {
tmpfile, err := os.CreateTemp("", "hauler")
if err != nil {
return nil, err
}
digester := digest.Canonical.Digester()
zw := gzip.NewWriter(io.MultiWriter(tmpfile, digester.Hash()))
defer zw.Close()
tarDigester := digest.Canonical.Digester()
if err := tarDir(d.path(u), d.Name(u), io.MultiWriter(zw, tarDigester.Hash()), false); err != nil {
return nil, err
}
if err := zw.Close(); err != nil {
return nil, err
}
if err := tmpfile.Sync(); err != nil {
return nil, err
}
fi, err := os.Open(tmpfile.Name())
if err != nil {
return nil, err
}
// rc := &closer{
// t: io.TeeReader(tmpfile, fi),
// closes: []func() error{fi.Close, tmpfile.Close, zw.Close},
// }
return fi, nil
}
func (d directory) Detect(u *url.URL) bool {
if len(d.path(u)) == 0 {
return false
}
fi, err := os.Stat(d.path(u))
if err != nil {
return false
}
return fi.IsDir()
}
func (d directory) Config(u *url.URL) artifacts.Config {
c := &directoryConfig{
config{Reference: u.String()},
}
return artifacts.ToConfig(c, artifacts.WithConfigMediaType(consts.FileDirectoryConfigMediaType))
}
type directoryConfig struct {
config `json:",inline,omitempty"`
}
func tarDir(root string, prefix string, w io.Writer, stripTimes bool) error {
tw := tar.NewWriter(w)
defer tw.Close()
if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Rename path
name, err := filepath.Rel(root, path)
if err != nil {
return err
}
name = filepath.Join(prefix, name)
name = filepath.ToSlash(name)
// Generate header
var link string
mode := info.Mode()
if mode&os.ModeSymlink != 0 {
if link, err = os.Readlink(path); err != nil {
return err
}
}
header, err := tar.FileInfoHeader(info, link)
if err != nil {
return errors.Wrap(err, path)
}
header.Name = name
header.Uid = 0
header.Gid = 0
header.Uname = ""
header.Gname = ""
if stripTimes {
header.ModTime = time.Time{}
header.AccessTime = time.Time{}
header.ChangeTime = time.Time{}
}
// Write file
if err := tw.WriteHeader(header); err != nil {
return errors.Wrap(err, "tar")
}
if mode.IsRegular() {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
if _, err := io.Copy(tw, file); err != nil {
return errors.Wrap(err, path)
}
}
return nil
}); err != nil {
return err
}
return nil
}
type closer struct {
t io.Reader
closes []func() error
}
func (c *closer) Read(p []byte) (n int, err error) {
return c.t.Read(p)
}
func (c *closer) Close() error {
var err error
for _, c := range c.closes {
lastErr := c()
if err == nil {
err = lastErr
}
}
return err
}

View File

@@ -0,0 +1,53 @@
package getter
import (
"context"
"io"
"net/url"
"os"
"path/filepath"
"github.com/rancherfederal/hauler/pkg/artifacts"
"github.com/rancherfederal/hauler/pkg/consts"
)
type File struct{}
func NewFile() *File {
return &File{}
}
func (f File) Name(u *url.URL) string {
return filepath.Base(f.path(u))
}
func (f File) Open(ctx context.Context, u *url.URL) (io.ReadCloser, error) {
return os.Open(f.path(u))
}
func (f File) Detect(u *url.URL) bool {
if len(f.path(u)) == 0 {
return false
}
fi, err := os.Stat(f.path(u))
if err != nil {
return false
}
return !fi.IsDir()
}
func (f File) path(u *url.URL) string {
return filepath.Join(u.Host, u.Path)
}
func (f File) Config(u *url.URL) artifacts.Config {
c := &fileConfig{
config{Reference: u.String()},
}
return artifacts.ToConfig(c, artifacts.WithConfigMediaType(consts.FileLocalConfigMediaType))
}
type fileConfig struct {
config `json:",inline,omitempty"`
}

View File

@@ -0,0 +1,148 @@
package getter
import (
"context"
"fmt"
"io"
"net/url"
v1 "github.com/google/go-containerregistry/pkg/v1"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"oras.land/oras-go/pkg/content"
content2 "github.com/rancherfederal/hauler/pkg/artifacts"
"github.com/rancherfederal/hauler/pkg/consts"
"github.com/rancherfederal/hauler/pkg/layer"
)
type Client struct {
Getters map[string]Getter
Options ClientOptions
}
// ClientOptions provides options for the client
type ClientOptions struct {
NameOverride string
}
var (
ErrGetterTypeUnknown = errors.New("no getter type found matching reference")
)
type Getter interface {
Open(context.Context, *url.URL) (io.ReadCloser, error)
Detect(*url.URL) bool
Name(*url.URL) string
Config(*url.URL) content2.Config
}
func NewClient(opts ClientOptions) *Client {
defaults := map[string]Getter{
"file": NewFile(),
"directory": NewDirectory(),
"http": NewHttp(),
}
c := &Client{
Getters: defaults,
Options: opts,
}
return c
}
func (c *Client) LayerFrom(ctx context.Context, source string) (v1.Layer, error) {
u, err := url.Parse(source)
if err != nil {
return nil, err
}
g, err := c.getterFrom(u)
if err != nil {
if errors.Is(err, ErrGetterTypeUnknown) {
return nil, err
}
return nil, fmt.Errorf("create getter: %w", err)
}
opener := func() (io.ReadCloser, error) {
return g.Open(ctx, u)
}
annotations := make(map[string]string)
annotations[ocispec.AnnotationTitle] = c.Name(source)
switch g.(type) {
case *directory:
annotations[content.AnnotationUnpack] = "true"
}
l, err := layer.FromOpener(opener,
layer.WithMediaType(consts.FileLayerMediaType),
layer.WithAnnotations(annotations))
if err != nil {
return nil, err
}
return l, nil
}
func (c *Client) ContentFrom(ctx context.Context, source string) (io.ReadCloser, error) {
u, err := url.Parse(source)
if err != nil {
return nil, fmt.Errorf("parse source %s: %w", source, err)
}
g, err := c.getterFrom(u)
if err != nil {
if errors.Is(err, ErrGetterTypeUnknown) {
return nil, err
}
return nil, fmt.Errorf("create getter: %w", err)
}
return g.Open(ctx, u)
}
func (c *Client) getterFrom(srcUrl *url.URL) (Getter, error) {
for _, g := range c.Getters {
if g.Detect(srcUrl) {
return g, nil
}
}
return nil, errors.Wrapf(ErrGetterTypeUnknown, "source %s", srcUrl.String())
}
func (c *Client) Name(source string) string {
if c.Options.NameOverride != "" {
return c.Options.NameOverride
}
u, err := url.Parse(source)
if err != nil {
return source
}
for _, g := range c.Getters {
if g.Detect(u) {
return g.Name(u)
}
}
return source
}
func (c *Client) Config(source string) content2.Config {
u, err := url.Parse(source)
if err != nil {
return nil
}
for _, g := range c.Getters {
if g.Detect(u) {
return g.Config(u)
}
}
return nil
}
type config struct {
Reference string `json:"reference"`
Annotations map[string]string `json:"annotations,omitempty"`
}

View File

@@ -0,0 +1,139 @@
package getter_test
import (
"net/url"
"os"
"path/filepath"
"testing"
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
)
func TestClient_Detect(t *testing.T) {
teardown := setup(t)
defer teardown()
c := getter.NewClient(getter.ClientOptions{})
type args struct {
source string
}
tests := []struct {
name string
args args
want string
}{
{
name: "should identify a file",
args: args{
source: fileWithExt,
},
want: "file",
},
{
name: "should identify a directory",
args: args{
source: rootDir,
},
want: "directory",
},
{
name: "should identify an http fqdn",
args: args{
source: "http://my.cool.website",
},
want: "http",
},
{
name: "should identify an http fqdn",
args: args{
source: "https://my.cool.website",
},
want: "http",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := identify(c, tt.args.source); got != tt.want {
t.Errorf("identify() = %v, want %v", got, tt.want)
}
})
}
}
func identify(c *getter.Client, source string) string {
u, _ := url.Parse(source)
for t, g := range c.Getters {
if g.Detect(u) {
return t
}
}
return ""
}
func TestClient_Name(t *testing.T) {
teardown := setup(t)
defer teardown()
type args struct {
source string
opts getter.ClientOptions
}
tests := []struct {
name string
args args
want string
}{
{
name: "should correctly name a file with an extension",
args: args{
source: fileWithExt,
opts: getter.ClientOptions{},
},
want: "file.yaml",
},
{
name: "should correctly name a directory",
args: args{
source: rootDir,
opts: getter.ClientOptions{},
},
want: rootDir,
},
{
name: "should correctly override a files name",
args: args{
source: fileWithExt,
opts: getter.ClientOptions{NameOverride: "myfile"},
},
want: "myfile",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := getter.NewClient(tt.args.opts)
if got := c.Name(tt.args.source); got != tt.want {
t.Errorf("Name() = %v, want %v", got, tt.want)
}
})
}
}
var (
rootDir = "gettertests"
fileWithExt = filepath.Join(rootDir, "file.yaml")
)
func setup(t *testing.T) func() {
if err := os.MkdirAll(rootDir, os.ModePerm); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(fileWithExt, []byte(""), 0644); err != nil {
t.Fatal(err)
}
return func() {
os.RemoveAll(rootDir)
}
}

View File

@@ -0,0 +1,72 @@
package getter
import (
"context"
"io"
"mime"
"net/http"
"net/url"
"path/filepath"
"strings"
"github.com/rancherfederal/hauler/pkg/artifacts"
"github.com/rancherfederal/hauler/pkg/consts"
)
type Http struct{}
func NewHttp() *Http {
return &Http{}
}
func (h Http) Name(u *url.URL) string {
resp, err := http.Head(u.String())
if err != nil {
return ""
}
name, _ := url.PathUnescape(u.String())
if err != nil {
return ""
}
contentType := resp.Header.Get("Content-Type")
for _, v := range strings.Split(contentType, ",") {
t, _, err := mime.ParseMediaType(v)
if err != nil {
break
}
// TODO: Identify known mimetypes for hints at a filename
_ = t
}
// TODO: Not this
return filepath.Base(name)
}
func (h Http) Open(ctx context.Context, u *url.URL) (io.ReadCloser, error) {
resp, err := http.Get(u.String())
if err != nil {
return nil, err
}
return resp.Body, nil
}
func (h Http) Detect(u *url.URL) bool {
switch u.Scheme {
case "http", "https":
return true
}
return false
}
func (h *Http) Config(u *url.URL) artifacts.Config {
c := &httpConfig{
config{Reference: u.String()},
}
return artifacts.ToConfig(c, artifacts.WithConfigMediaType(consts.FileHttpConfigMediaType))
}
type httpConfig struct {
config `json:",inline,omitempty"`
}

View File

@@ -0,0 +1,26 @@
package file
import (
"github.com/rancherfederal/hauler/pkg/artifacts"
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
)
type Option func(*File)
func WithClient(c *getter.Client) Option {
return func(f *File) {
f.client = c
}
}
func WithConfig(obj interface{}, mediaType string) Option {
return func(f *File) {
f.config = artifacts.ToConfig(obj, artifacts.WithConfigMediaType(mediaType))
}
}
func WithAnnotations(m map[string]string) Option {
return func(f *File) {
f.annotations = m
}
}

View File

@@ -0,0 +1,80 @@
package image
import (
"fmt"
"github.com/google/go-containerregistry/pkg/authn"
gname "github.com/google/go-containerregistry/pkg/name"
gv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/rancherfederal/hauler/pkg/artifacts"
)
var _ artifacts.OCI = (*Image)(nil)
func (i *Image) MediaType() string {
mt, err := i.Image.MediaType()
if err != nil {
return ""
}
return string(mt)
}
func (i *Image) RawConfig() ([]byte, error) {
return i.RawConfigFile()
}
// Image implements the OCI interface for Image API objects. API spec information
// is stored into the Name field.
type Image struct {
Name string
gv1.Image
}
func NewImage(name string, opts ...remote.Option) (*Image, error) {
r, err := gname.ParseReference(name)
if err != nil {
return nil, err
}
defaultOpts := []remote.Option{
remote.WithAuthFromKeychain(authn.DefaultKeychain),
}
opts = append(opts, defaultOpts...)
img, err := remote.Image(r, opts...)
if err != nil {
return nil, err
}
return &Image{
Name: name,
Image: img,
}, nil
}
func IsMultiArchImage(name string, opts ...remote.Option) (bool, error) {
ref, err := gname.ParseReference(name)
if err != nil {
return false, fmt.Errorf("parsing reference %q: %v", name, err)
}
defaultOpts := []remote.Option{
remote.WithAuthFromKeychain(authn.DefaultKeychain),
}
opts = append(opts, defaultOpts...)
desc, err := remote.Get(ref, opts...)
if err != nil {
return false, fmt.Errorf("getting image %q: %v", name, err)
}
_, err = desc.ImageIndex()
if err != nil {
// If the descriptor could not be converted to an image index, it's not a multi-arch image
return false, nil
}
// If the descriptor could be converted to an image index, it's a multi-arch image
return true, nil
}

View File

@@ -0,0 +1 @@
package image_test

View File

@@ -0,0 +1,78 @@
package memory
import (
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/static"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/rancherfederal/hauler/pkg/artifacts"
"github.com/rancherfederal/hauler/pkg/consts"
)
var _ artifacts.OCI = (*Memory)(nil)
// Memory implements the OCI interface for a generic set of bytes stored in memory.
type Memory struct {
blob v1.Layer
annotations map[string]string
config artifacts.Config
}
type defaultConfig struct {
MediaType string `json:"mediaType,omitempty"`
}
func NewMemory(data []byte, mt string, opts ...Option) *Memory {
blob := static.NewLayer(data, types.MediaType(mt))
cfg := defaultConfig{MediaType: consts.MemoryConfigMediaType}
m := &Memory{
blob: blob,
config: artifacts.ToConfig(cfg),
}
for _, opt := range opts {
opt(m)
}
return m
}
func (m *Memory) MediaType() string {
return consts.OCIManifestSchema1
}
func (m *Memory) Manifest() (*v1.Manifest, error) {
layer, err := partial.Descriptor(m.blob)
if err != nil {
return nil, err
}
cfgDesc, err := partial.Descriptor(m.config)
if err != nil {
return nil, err
}
manifest := &v1.Manifest{
SchemaVersion: 2,
MediaType: types.MediaType(m.MediaType()),
Config: *cfgDesc,
Layers: []v1.Descriptor{*layer},
Annotations: m.annotations,
}
return manifest, nil
}
func (m *Memory) RawConfig() ([]byte, error) {
if m.config == nil {
return []byte(`{}`), nil
}
return m.config.Raw()
}
func (m *Memory) Layers() ([]v1.Layer, error) {
var layers []v1.Layer
layers = append(layers, m.blob)
return layers, nil
}

View File

@@ -0,0 +1,61 @@
package memory_test
import (
"math/rand"
"testing"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/opencontainers/go-digest"
"github.com/rancherfederal/hauler/pkg/artifacts/memory"
)
func TestMemory_Layers(t *testing.T) {
tests := []struct {
name string
want *v1.Manifest
wantErr bool
}{
{
name: "should preserve content",
want: nil,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
data, m := setup(t)
layers, err := m.Layers()
if err != nil {
t.Fatal(err)
}
if len(layers) != 1 {
t.Fatalf("Expected 1 layer, got %d", len(layers))
}
h, err := layers[0].Digest()
if err != nil {
t.Fatal(err)
}
d := digest.FromBytes(data)
if d.String() != h.String() {
t.Fatalf("bytes do not match, got %s, expected %s", h.String(), d.String())
}
})
}
}
func setup(t *testing.T) ([]byte, *memory.Memory) {
block := make([]byte, 2048)
_, err := rand.Read(block)
if err != nil {
t.Fatal(err)
}
mem := memory.NewMemory(block, "random")
return block, mem
}

View File

@@ -0,0 +1,17 @@
package memory
import "github.com/rancherfederal/hauler/pkg/artifacts"
type Option func(*Memory)
func WithConfig(obj interface{}, mediaType string) Option {
return func(m *Memory) {
m.config = artifacts.ToConfig(obj, artifacts.WithConfigMediaType(mediaType))
}
}
func WithAnnotations(annotations map[string]string) Option {
return func(m *Memory) {
m.annotations = annotations
}
}

22
pkg/artifacts/ocis.go Normal file
View File

@@ -0,0 +1,22 @@
package artifacts
import "github.com/google/go-containerregistry/pkg/v1"
// OCI is the bare minimum we need to represent an artifact in an oci layout
//
// At a high level, it is not constrained by an Image's config, manifests, and layer ordinality
// This specific implementation fully encapsulates v1.Layer's within a more generic form
type OCI interface {
MediaType() string
Manifest() (*v1.Manifest, error)
RawConfig() ([]byte, error)
Layers() ([]v1.Layer, error)
}
type OCICollection interface {
// Contents returns the list of contents in the collection
Contents() (map[string]OCI, error)
}

View File

@@ -1,180 +0,0 @@
package bootstrap
import (
"bytes"
"context"
"fmt"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/otiai10/copy"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/driver"
"github.com/rancherfederal/hauler/pkg/fs"
"github.com/rancherfederal/hauler/pkg/log"
"helm.sh/helm/v3/pkg/chart/loader"
"io"
"os"
"path/filepath"
)
type Booter interface {
Init() error
PreBoot(context.Context) error
Boot(context.Context, driver.Driver) error
PostBoot(context.Context, driver.Driver) error
}
type booter struct {
Package v1alpha1.Package
fs fs.PkgFs
logger log.Logger
}
//NewBooter will build a new booter given a path to a directory containing a hauler package.json
func NewBooter(pkgPath string, logger log.Logger) (*booter, error) {
pkg, err := v1alpha1.LoadPackageFromDir(pkgPath)
if err != nil {
return nil, err
}
fsys := fs.NewPkgFS(pkgPath)
return &booter{
Package: pkg,
fs: fsys,
logger: logger,
}, nil
}
func (b booter) PreBoot(ctx context.Context, d driver.Driver) error {
b.logger.Infof("Beginning pre boot")
//TODO: Feel like there's a better way to do all this dir creation
if err := os.MkdirAll(d.DataPath(), os.ModePerm); err != nil {
return err
}
if err := b.moveBin(); err != nil {
return err
}
if err := b.moveImages(d); err != nil {
return err
}
if err := b.moveBundles(d); err != nil {
return err
}
if err := b.moveCharts(d); err != nil {
return err
}
b.logger.Debugf("Writing %s config", d.Name())
if err := d.WriteConfig(); err != nil {
return err
}
b.logger.Successf("Completed pre boot")
return nil
}
func (b booter) Boot(ctx context.Context, d driver.Driver) error {
b.logger.Infof("Beginning boot")
var stdoutBuf, stderrBuf bytes.Buffer
out := io.MultiWriter(os.Stdout, &stdoutBuf, &stderrBuf)
err := d.Start(out)
if err != nil {
return err
}
b.logger.Infof("Waiting for driver core components to provision...")
waitErr := waitForDriver(ctx, d)
if waitErr != nil {
return err
}
b.logger.Successf("Completed boot")
return nil
}
func (b booter) PostBoot(ctx context.Context, d driver.Driver) error {
b.logger.Infof("Beginning post boot")
cf := NewBootConfig("fleet-system", d.KubeConfigPath())
fleetCrdChartPath := b.fs.Chart().Path(fmt.Sprintf("fleet-crd-%s.tgz", b.Package.Spec.Fleet.VLess()))
fleetCrdChart, err := loader.Load(fleetCrdChartPath)
if err != nil {
return err
}
b.logger.Infof("Installing fleet crds")
fleetCrdRelease, fleetCrdErr := installChart(cf, fleetCrdChart, "fleet-crd", nil, b.logger)
if fleetCrdErr != nil {
return fleetCrdErr
}
b.logger.Infof("Installed '%s' to namespace '%s'", fleetCrdRelease.Name, fleetCrdRelease.Namespace)
fleetChartPath := b.fs.Chart().Path(fmt.Sprintf("fleet-%s.tgz", b.Package.Spec.Fleet.VLess()))
fleetChart, err := loader.Load(fleetChartPath)
if err != nil {
return err
}
b.logger.Infof("Installing fleet")
fleetRelease, fleetErr := installChart(cf, fleetChart, "fleet", nil, b.logger)
if fleetErr != nil {
return fleetErr
}
b.logger.Infof("Installed '%s' to namespace '%s'", fleetRelease.Name, fleetRelease.Namespace)
b.logger.Successf("Completed post boot")
return nil
}
//TODO: Move* will actually just copy. This is more expensive, but is much safer/easier at handling deep merges, should this change?
func (b booter) moveBin() error {
path := filepath.Join("/opt/hauler/bin")
if err := os.MkdirAll(path, os.ModePerm); err != nil {
return err
}
return copy.Copy(b.fs.Bin().Path(), path)
}
func (b booter) moveImages(d driver.Driver) error {
//NOTE: archives are not recursively searched, this _must_ be at the images dir
path := d.DataPath("agent/images")
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
refs, err := b.fs.MapLayout()
if err != nil {
return err
}
return tarball.MultiRefWriteToFile(filepath.Join(path, "hauler.tar"), refs)
}
func (b booter) moveBundles(d driver.Driver) error {
path := d.DataPath("server/manifests/hauler")
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
return copy.Copy(b.fs.Bundle().Path(), path)
}
func (b booter) moveCharts(d driver.Driver) error {
path := d.DataPath("server/static/charts/hauler")
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
return copy.Copy(b.fs.Chart().Path(), path)
}

View File

@@ -1,29 +0,0 @@
package bootstrap
import (
"k8s.io/cli-runtime/pkg/genericclioptions"
)
type BootSettings struct {
config *genericclioptions.ConfigFlags
Namespace string
KubeConfig string
}
func NewBootConfig(ns, kubepath string) *BootSettings {
env := &BootSettings{
Namespace: ns,
KubeConfig: kubepath,
}
env.config = &genericclioptions.ConfigFlags{
Namespace: &env.Namespace,
KubeConfig: &env.KubeConfig,
}
return env
}
// RESTClientGetter gets the kubeconfig from BootSettings
func (s *BootSettings) RESTClientGetter() genericclioptions.RESTClientGetter {
return s.config
}

View File

@@ -1,20 +0,0 @@
package bootstrap
import (
"testing"
)
func TestBootSettings(t *testing.T) {
ns := "test"
kpath := "somepath"
settings := NewBootConfig(ns, kpath)
if settings.Namespace != ns {
t.Errorf("expected namespace %q, got %q", ns, settings.Namespace)
}
if settings.KubeConfig != kpath {
t.Errorf("expected kube-config %q, got %q", kpath, settings.KubeConfig)
}
}

View File

@@ -1,63 +0,0 @@
package bootstrap
import (
"context"
"errors"
"github.com/rancherfederal/hauler/pkg/driver"
"github.com/rancherfederal/hauler/pkg/kube"
"github.com/rancherfederal/hauler/pkg/log"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/release"
"os"
"time"
)
func waitForDriver(ctx context.Context, d driver.Driver) error {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
//TODO: This is a janky way of waiting for file to exist
for {
_, err := os.Stat(d.KubeConfigPath())
if err == nil {
break
}
if ctx.Err() == context.DeadlineExceeded {
return errors.New("timed out waiting for driver to provision")
}
time.Sleep(1 * time.Second)
}
cfg, err := kube.NewKubeConfig()
if err != nil {
return err
}
sc, err := kube.NewStatusChecker(cfg, 5*time.Second, 5*time.Minute)
if err != nil {
return err
}
return sc.WaitForCondition(d.SystemObjects()...)
}
//TODO: This is likely way too fleet specific
func installChart(cf *BootSettings, chart *chart.Chart, releaseName string, vals map[string]interface{}, logger log.Logger) (*release.Release, error) {
actionConfig := new(action.Configuration)
if err := actionConfig.Init(cf.RESTClientGetter(), cf.Namespace, os.Getenv("HELM_DRIVER"), logger.Debugf); err != nil {
return nil, err
}
client := action.NewInstall(actionConfig)
client.ReleaseName = releaseName
client.CreateNamespace = true
client.Wait = true
//TODO: Do this better
client.Namespace = cf.Namespace
return client.Run(chart, vals)
}

View File

@@ -0,0 +1,107 @@
package chart
import (
"helm.sh/helm/v3/pkg/action"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/artifacts"
"github.com/rancherfederal/hauler/pkg/artifacts/image"
"github.com/rancherfederal/hauler/pkg/content/chart"
"github.com/rancherfederal/hauler/pkg/reference"
)
var _ artifacts.OCICollection = (*tchart)(nil)
// tchart is a thick chart that includes all the dependent images as well as the chart itself
type tchart struct {
chart *chart.Chart
config v1alpha1.ThickChart
computed bool
contents map[string]artifacts.OCI
}
func NewThickChart(cfg v1alpha1.ThickChart, opts *action.ChartPathOptions) (artifacts.OCICollection, error) {
o, err := chart.NewChart(cfg.Chart.Name, opts)
if err != nil {
return nil, err
}
return &tchart{
chart: o,
config: cfg,
contents: make(map[string]artifacts.OCI),
}, nil
}
func (c *tchart) Contents() (map[string]artifacts.OCI, error) {
if err := c.compute(); err != nil {
return nil, err
}
return c.contents, nil
}
func (c *tchart) compute() error {
if c.computed {
return nil
}
if err := c.dependentImages(); err != nil {
return err
}
if err := c.chartContents(); err != nil {
return err
}
if err := c.extraImages(); err != nil {
return err
}
c.computed = true
return nil
}
func (c *tchart) chartContents() error {
ch, err := c.chart.Load()
if err != nil {
return err
}
ref, err := reference.NewTagged(ch.Name(), ch.Metadata.Version)
if err != nil {
return err
}
c.contents[ref.Name()] = c.chart
return nil
}
func (c *tchart) dependentImages() error {
ch, err := c.chart.Load()
if err != nil {
return err
}
imgs, err := ImagesInChart(ch)
if err != nil {
return err
}
for _, img := range imgs.Spec.Images {
i, err := image.NewImage(img.Name)
if err != nil {
return err
}
c.contents[img.Name] = i
}
return nil
}
func (c *tchart) extraImages() error {
for _, img := range c.config.ExtraImages {
i, err := image.NewImage(img.Reference)
if err != nil {
return err
}
c.contents[img.Reference] = i
}
return nil
}

View File

@@ -0,0 +1,129 @@
package chart
import (
"bufio"
"bytes"
"io"
"strings"
"helm.sh/helm/v3/pkg/action"
helmchart "helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chartutil"
"helm.sh/helm/v3/pkg/kube/fake"
"helm.sh/helm/v3/pkg/storage"
"helm.sh/helm/v3/pkg/storage/driver"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/util/jsonpath"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
)
var defaultKnownImagePaths = []string{
// Deployments & DaemonSets
"{.spec.template.spec.initContainers[*].image}",
"{.spec.template.spec.containers[*].image}",
// Pods
"{.spec.initContainers[*].image}",
"{.spec.containers[*].image}",
}
// ImagesInChart will render a chart and identify all dependent images from it
func ImagesInChart(c *helmchart.Chart) (v1alpha1.Images, error) {
docs, err := template(c)
if err != nil {
return v1alpha1.Images{}, err
}
var images []v1alpha1.Image
reader := yaml.NewYAMLReader(bufio.NewReader(strings.NewReader(docs)))
for {
raw, err := reader.Read()
if err == io.EOF {
break
}
if err != nil {
return v1alpha1.Images{}, err
}
found := find(raw, defaultKnownImagePaths...)
for _, f := range found {
images = append(images, v1alpha1.Image{Name: f})
}
}
ims := v1alpha1.Images{
Spec: v1alpha1.ImageSpec{
Images: images,
},
}
return ims, nil
}
func template(c *helmchart.Chart) (string, error) {
s := storage.Init(driver.NewMemory())
templateCfg := &action.Configuration{
RESTClientGetter: nil,
Releases: s,
KubeClient: &fake.PrintingKubeClient{Out: io.Discard},
Capabilities: chartutil.DefaultCapabilities,
Log: func(format string, v ...interface{}) {},
}
// TODO: Do we need values if we're claiming this is best effort image detection?
// Justification being: if users are relying on us to get images from their values, they could just add images to the []ImagesInChart spec of the Store api
vals := make(map[string]interface{})
client := action.NewInstall(templateCfg)
client.ReleaseName = "dry"
client.DryRun = true
client.Replace = true
client.ClientOnly = true
client.IncludeCRDs = true
release, err := client.Run(c, vals)
if err != nil {
return "", err
}
return release.Manifest, nil
}
func find(data []byte, paths ...string) []string {
var (
pathMatches []string
obj interface{}
)
if err := yaml.Unmarshal(data, &obj); err != nil {
return nil
}
j := jsonpath.New("")
j.AllowMissingKeys(true)
for _, p := range paths {
r, err := parseJSONPath(obj, j, p)
if err != nil {
continue
}
pathMatches = append(pathMatches, r...)
}
return pathMatches
}
func parseJSONPath(data interface{}, parser *jsonpath.JSONPath, template string) ([]string, error) {
buf := new(bytes.Buffer)
if err := parser.Parse(template); err != nil {
return nil, err
}
if err := parser.Execute(buf, data); err != nil {
return nil, err
}
f := func(s rune) bool { return s == ' ' }
r := strings.FieldsFunc(buf.String(), f)
return r, nil
}

View File

@@ -0,0 +1,232 @@
package imagetxt
import (
"bufio"
"context"
"fmt"
"io"
"os"
"strings"
"sync"
"github.com/google/go-containerregistry/pkg/name"
artifact "github.com/rancherfederal/hauler/pkg/artifacts"
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
"github.com/rancherfederal/hauler/pkg/artifacts/image"
"github.com/rancherfederal/hauler/pkg/log"
)
type ImageTxt struct {
Ref string
IncludeSources map[string]bool
ExcludeSources map[string]bool
lock *sync.Mutex
client *getter.Client
computed bool
contents map[string]artifact.OCI
}
var _ artifact.OCICollection = (*ImageTxt)(nil)
type Option interface {
Apply(*ImageTxt) error
}
type withIncludeSources []string
func (o withIncludeSources) Apply(it *ImageTxt) error {
if it.IncludeSources == nil {
it.IncludeSources = make(map[string]bool)
}
for _, s := range o {
it.IncludeSources[s] = true
}
return nil
}
func WithIncludeSources(include ...string) Option {
return withIncludeSources(include)
}
type withExcludeSources []string
func (o withExcludeSources) Apply(it *ImageTxt) error {
if it.ExcludeSources == nil {
it.ExcludeSources = make(map[string]bool)
}
for _, s := range o {
it.ExcludeSources[s] = true
}
return nil
}
func WithExcludeSources(exclude ...string) Option {
return withExcludeSources(exclude)
}
func New(ref string, opts ...Option) (*ImageTxt, error) {
it := &ImageTxt{
Ref: ref,
client: getter.NewClient(getter.ClientOptions{}),
lock: &sync.Mutex{},
}
for i, o := range opts {
if err := o.Apply(it); err != nil {
return nil, fmt.Errorf("invalid option %d: %v", i, err)
}
}
return it, nil
}
func (it *ImageTxt) Contents() (map[string]artifact.OCI, error) {
it.lock.Lock()
defer it.lock.Unlock()
if !it.computed {
if err := it.compute(); err != nil {
return nil, fmt.Errorf("compute OCI layout: %v", err)
}
it.computed = true
}
return it.contents, nil
}
func (it *ImageTxt) compute() error {
// TODO - pass in logger from context
l := log.NewLogger(os.Stdout)
it.contents = make(map[string]artifact.OCI)
ctx := context.TODO()
rc, err := it.client.ContentFrom(ctx, it.Ref)
if err != nil {
return fmt.Errorf("fetch image.txt ref %s: %w", it.Ref, err)
}
defer rc.Close()
entries, err := splitImagesTxt(rc)
if err != nil {
return fmt.Errorf("parse image.txt ref %s: %v", it.Ref, err)
}
foundSources := make(map[string]bool)
for _, e := range entries {
for s := range e.Sources {
foundSources[s] = true
}
}
var pullAll bool
targetSources := make(map[string]bool)
if len(foundSources) == 0 || (len(it.IncludeSources) == 0 && len(it.ExcludeSources) == 0) {
// pull all found images
pullAll = true
if len(foundSources) == 0 {
l.Infof("image txt file appears to have no sources; pulling all found images")
if len(it.IncludeSources) != 0 || len(it.ExcludeSources) != 0 {
l.Warnf("ImageTxt provided include or exclude sources; ignoring")
}
} else if len(it.IncludeSources) == 0 && len(it.ExcludeSources) == 0 {
l.Infof("image-sources txt file not filtered; pulling all found images")
}
} else {
// determine sources to pull
if len(it.IncludeSources) != 0 && len(it.ExcludeSources) != 0 {
l.Warnf("ImageTxt provided include and exclude sources; using only include sources")
}
if len(it.IncludeSources) != 0 {
targetSources = it.IncludeSources
} else {
for s := range foundSources {
targetSources[s] = true
}
for s := range it.ExcludeSources {
delete(targetSources, s)
}
}
var targetSourcesArr []string
for s := range targetSources {
targetSourcesArr = append(targetSourcesArr, s)
}
l.Infof("pulling images covering sources %s", strings.Join(targetSourcesArr, ", "))
}
for _, e := range entries {
var matchesSourceFilter bool
if pullAll {
l.Infof("pulling image %s", e.Reference)
} else {
for s := range e.Sources {
if targetSources[s] {
matchesSourceFilter = true
l.Infof("pulling image %s (matched source %s)", e.Reference, s)
break
}
}
}
if pullAll || matchesSourceFilter {
curImage, err := image.NewImage(e.Reference.String())
if err != nil {
return fmt.Errorf("pull image %s: %v", e.Reference, err)
}
it.contents[e.Reference.String()] = curImage
}
}
return nil
}
type imageTxtEntry struct {
Reference name.Reference
Sources map[string]bool
}
func splitImagesTxt(r io.Reader) ([]imageTxtEntry, error) {
var entries []imageTxtEntry
scanner := bufio.NewScanner(r)
for scanner.Scan() {
curEntry := imageTxtEntry{
Sources: make(map[string]bool),
}
lineContent := scanner.Text()
if lineContent == "" || strings.HasPrefix(lineContent, "#") {
// skip past empty and commented lines
continue
}
splitContent := strings.Split(lineContent, " ")
if len(splitContent) > 2 {
return nil, fmt.Errorf(
"invalid image.txt format: must contain only an image reference and sources separated by space; invalid line: %q",
lineContent)
}
curRef, err := name.ParseReference(splitContent[0])
if err != nil {
return nil, fmt.Errorf("invalid reference %s: %v", splitContent[0], err)
}
curEntry.Reference = curRef
if len(splitContent) == 2 {
for _, source := range strings.Split(splitContent[1], ",") {
curEntry.Sources[source] = true
}
}
entries = append(entries, curEntry)
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("scan contents: %v", err)
}
return entries, nil
}

View File

@@ -0,0 +1,209 @@
package imagetxt
import (
"errors"
"fmt"
"net/http"
"net/http/httptest"
"os"
"testing"
"github.com/rancherfederal/hauler/pkg/artifacts"
"github.com/rancherfederal/hauler/pkg/artifacts/image"
)
var (
ErrRefNotFound = errors.New("ref not found")
ErrRefNotImage = errors.New("ref is not image")
ErrExtraRefsFound = errors.New("extra refs found in contents")
)
var (
testServer *httptest.Server
)
func TestMain(m *testing.M) {
setup()
code := m.Run()
teardown()
os.Exit(code)
}
func setup() {
dir := http.Dir("./testdata/http/")
h := http.FileServer(dir)
testServer = httptest.NewServer(h)
}
func teardown() {
if testServer != nil {
testServer.Close()
}
}
type failKind string
const (
failKindNew = failKind("New")
failKindContents = failKind("Contents")
)
func checkError(checkedFailKind failKind) func(*testing.T, error, bool, failKind) {
return func(cet *testing.T, err error, testShouldFail bool, testFailKind failKind) {
if err != nil {
// if error should not have happened at all OR error should have happened
// at a different point, test failed
if !testShouldFail || testFailKind != checkedFailKind {
cet.Fatalf("unexpected error at %s: %v", checkedFailKind, err)
}
// test should fail at this point, test passed
return
}
// if no error occurred but error should have happened at this point, test
// failed
if testShouldFail && testFailKind == checkedFailKind {
cet.Fatalf("unexpected nil error at %s", checkedFailKind)
}
}
}
func TestImageTxtCollection(t *testing.T) {
type testEntry struct {
Name string
Ref string
IncludeSources []string
ExcludeSources []string
ExpectedImages []string
ShouldFail bool
FailKind failKind
}
tt := []testEntry{
{
Name: "http ref basic",
Ref: fmt.Sprintf("%s/images-http.txt", testServer.URL),
ExpectedImages: []string{
"busybox",
"nginx:1.19",
"rancher/hyperkube:v1.21.7-rancher1",
"docker.io/rancher/klipper-lb:v0.3.4",
"quay.io/jetstack/cert-manager-controller:v1.6.1",
},
},
{
Name: "http ref sources format pull all",
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
ExpectedImages: []string{
"busybox",
"nginx:1.19",
"rancher/hyperkube:v1.21.7-rancher1",
"docker.io/rancher/klipper-lb:v0.3.4",
"quay.io/jetstack/cert-manager-controller:v1.6.1",
},
},
{
Name: "http ref sources format include sources A",
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
IncludeSources: []string{
"core", "rke",
},
ExpectedImages: []string{
"busybox",
"nginx:1.19",
"rancher/hyperkube:v1.21.7-rancher1",
},
},
{
Name: "http ref sources format include sources B",
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
IncludeSources: []string{
"nginx", "rancher", "cert-manager",
},
ExpectedImages: []string{
"nginx:1.19",
"rancher/hyperkube:v1.21.7-rancher1",
"docker.io/rancher/klipper-lb:v0.3.4",
"quay.io/jetstack/cert-manager-controller:v1.6.1",
},
},
{
Name: "http ref sources format exclude sources A",
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
ExcludeSources: []string{
"cert-manager",
},
ExpectedImages: []string{
"busybox",
"nginx:1.19",
"rancher/hyperkube:v1.21.7-rancher1",
"docker.io/rancher/klipper-lb:v0.3.4",
},
},
{
Name: "http ref sources format exclude sources B",
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
ExcludeSources: []string{
"core",
},
ExpectedImages: []string{
"nginx:1.19",
"rancher/hyperkube:v1.21.7-rancher1",
"docker.io/rancher/klipper-lb:v0.3.4",
"quay.io/jetstack/cert-manager-controller:v1.6.1",
},
},
{
Name: "local file ref",
Ref: "./testdata/images-file.txt",
ExpectedImages: []string{
"busybox",
"nginx:1.19",
"rancher/hyperkube:v1.21.7-rancher1",
"docker.io/rancher/klipper-lb:v0.3.4",
"quay.io/jetstack/cert-manager-controller:v1.6.1",
},
},
}
checkErrorNew := checkError(failKindNew)
checkErrorContents := checkError(failKindContents)
for _, curTest := range tt {
t.Run(curTest.Name, func(innerT *testing.T) {
curImageTxt, err := New(curTest.Ref,
WithIncludeSources(curTest.IncludeSources...),
WithExcludeSources(curTest.ExcludeSources...),
)
checkErrorNew(innerT, err, curTest.ShouldFail, curTest.FailKind)
ociContents, err := curImageTxt.Contents()
checkErrorContents(innerT, err, curTest.ShouldFail, curTest.FailKind)
if err := checkImages(ociContents, curTest.ExpectedImages); err != nil {
innerT.Fatal(err)
}
})
}
}
func checkImages(content map[string]artifacts.OCI, refs []string) error {
contentCopy := make(map[string]artifacts.OCI, len(content))
for k, v := range content {
contentCopy[k] = v
}
for _, ref := range refs {
target, ok := content[ref]
if !ok {
return fmt.Errorf("ref %s: %w", ref, ErrRefNotFound)
}
if _, ok := target.(*image.Image); !ok {
return fmt.Errorf("got underlying type %T: %w", target, ErrRefNotImage)
}
delete(contentCopy, ref)
}
if len(contentCopy) != 0 {
return ErrExtraRefsFound
}
return nil
}

View File

@@ -0,0 +1,5 @@
busybox
nginx:1.19
rancher/hyperkube:v1.21.7-rancher1
docker.io/rancher/klipper-lb:v0.3.4
quay.io/jetstack/cert-manager-controller:v1.6.1

View File

@@ -0,0 +1,5 @@
busybox core
nginx:1.19 core,nginx
rancher/hyperkube:v1.21.7-rancher1 rancher,rke
docker.io/rancher/klipper-lb:v0.3.4 rancher,k3s
quay.io/jetstack/cert-manager-controller:v1.6.1 cert-manager

View File

@@ -0,0 +1,5 @@
busybox
nginx:1.19
rancher/hyperkube:v1.21.7-rancher1
docker.io/rancher/klipper-lb:v0.3.4
quay.io/jetstack/cert-manager-controller:v1.6.1

175
pkg/collection/k3s/k3s.go Normal file
View File

@@ -0,0 +1,175 @@
package k3s
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"path"
"strings"
"github.com/rancherfederal/hauler/pkg/artifacts"
"github.com/rancherfederal/hauler/pkg/artifacts/file"
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
"github.com/rancherfederal/hauler/pkg/artifacts/image"
"github.com/rancherfederal/hauler/pkg/reference"
)
var _ artifacts.OCICollection = (*k3s)(nil)
const (
releaseUrl = "https://github.com/k3s-io/k3s/releases/download"
channelUrl = "https://update.k3s.io/v1-release/channels"
bootstrapUrl = "https://get.k3s.io"
)
var (
ErrImagesNotFound = errors.New("k3s dependent images not found")
ErrFetchingImages = errors.New("failed to fetch k3s dependent images")
ErrExecutableNotfound = errors.New("k3s executable not found")
ErrChannelNotFound = errors.New("desired k3s channel not found")
)
type k3s struct {
version string
arch string
computed bool
contents map[string]artifacts.OCI
channels map[string]string
client *getter.Client
}
func NewK3s(version string) (artifacts.OCICollection, error) {
return &k3s{
version: version,
contents: make(map[string]artifacts.OCI),
}, nil
}
func (k *k3s) Contents() (map[string]artifacts.OCI, error) {
if err := k.compute(); err != nil {
return nil, err
}
return k.contents, nil
}
func (k *k3s) compute() error {
if k.computed {
return nil
}
if err := k.fetchChannels(); err == nil {
if version, ok := k.channels[k.version]; ok {
k.version = version
}
}
if err := k.images(); err != nil {
return err
}
if err := k.executable(); err != nil {
return err
}
if err := k.bootstrap(); err != nil {
return err
}
k.computed = true
return nil
}
func (k *k3s) executable() error {
n := "k3s"
if k.arch != "" && k.arch != "amd64" {
n = fmt.Sprintf("name-%s", k.arch)
}
fref := k.releaseUrl(n)
resp, err := http.Head(fref)
if resp.StatusCode != http.StatusOK || err != nil {
return ErrExecutableNotfound
}
f := file.NewFile(fref)
ref := fmt.Sprintf("%s/k3s:%s", reference.DefaultNamespace, k.dnsCompliantVersion())
k.contents[ref] = f
return nil
}
func (k *k3s) bootstrap() error {
c := getter.NewClient(getter.ClientOptions{NameOverride: "k3s-init.sh"})
f := file.NewFile(bootstrapUrl, file.WithClient(c))
ref := fmt.Sprintf("%s/k3s-init.sh:%s", reference.DefaultNamespace, reference.DefaultTag)
k.contents[ref] = f
return nil
}
func (k *k3s) images() error {
resp, err := http.Get(k.releaseUrl("k3s-images.txt"))
if resp.StatusCode != http.StatusOK {
return ErrFetchingImages
} else if err != nil {
return ErrImagesNotFound
}
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
reference := scanner.Text()
o, err := image.NewImage(reference)
if err != nil {
return err
}
k.contents[reference] = o
}
return nil
}
func (k *k3s) releaseUrl(artifact string) string {
u, _ := url.Parse(releaseUrl)
complete := []string{u.Path}
u.Path = path.Join(append(complete, []string{k.version, artifact}...)...)
return u.String()
}
func (k *k3s) dnsCompliantVersion() string {
return strings.ReplaceAll(k.version, "+", "-")
}
func (k *k3s) fetchChannels() error {
resp, err := http.Get(channelUrl)
if err != nil {
return err
}
var c channel
if err := json.NewDecoder(resp.Body).Decode(&c); err != nil {
return err
}
channels := make(map[string]string)
for _, ch := range c.Data {
channels[ch.Name] = ch.Latest
}
k.channels = channels
return nil
}
type channel struct {
Data []channelData `json:"data"`
}
type channelData struct {
ID string `json:"id"`
Name string `json:"name"`
Latest string `json:"latest"`
}

57
pkg/consts/consts.go Normal file
View File

@@ -0,0 +1,57 @@
package consts
const (
OCIManifestSchema1 = "application/vnd.oci.image.manifest.v1+json"
DockerManifestSchema2 = "application/vnd.docker.distribution.manifest.v2+json"
DockerManifestListSchema2 = "application/vnd.docker.distribution.manifest.list.v2+json"
OCIImageIndexSchema = "application/vnd.oci.image.index.v1+json"
DockerConfigJSON = "application/vnd.docker.container.image.v1+json"
DockerLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
DockerForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
DockerUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
OCILayer = "application/vnd.oci.image.layer.v1.tar+gzip"
OCIArtifact = "application/vnd.oci.empty.v1+json"
// ChartConfigMediaType is the reserved media type for the Helm chart manifest config
ChartConfigMediaType = "application/vnd.cncf.helm.config.v1+json"
// ChartLayerMediaType is the reserved media type for Helm chart package content
ChartLayerMediaType = "application/vnd.cncf.helm.chart.content.v1.tar+gzip"
// ProvLayerMediaType is the reserved media type for Helm chart provenance files
ProvLayerMediaType = "application/vnd.cncf.helm.chart.provenance.v1.prov"
// FileLayerMediaType is the reserved media type for File content layers
FileLayerMediaType = "application/vnd.content.hauler.file.layer.v1"
// FileLocalConfigMediaType is the reserved media type for File config
FileLocalConfigMediaType = "application/vnd.content.hauler.file.local.config.v1+json"
FileDirectoryConfigMediaType = "application/vnd.content.hauler.file.directory.config.v1+json"
FileHttpConfigMediaType = "application/vnd.content.hauler.file.http.config.v1+json"
// MemoryConfigMediaType is the reserved media type for Memory config for a generic set of bytes stored in memory
MemoryConfigMediaType = "application/vnd.content.hauler.memory.config.v1+json"
// WasmArtifactLayerMediaType is the reserved media type for WASM artifact layers
WasmArtifactLayerMediaType = "application/vnd.wasm.content.layer.v1+wasm"
// WasmConfigMediaType is the reserved media type for WASM configs
WasmConfigMediaType = "application/vnd.wasm.config.v1+json"
UnknownManifest = "application/vnd.hauler.cattle.io.unknown.v1+json"
UnknownLayer = "application/vnd.content.hauler.unknown.layer"
OCIVendorPrefix = "vnd.oci"
DockerVendorPrefix = "vnd.docker"
HaulerVendorPrefix = "vnd.hauler"
OCIImageIndexFile = "index.json"
KindAnnotationName = "kind"
KindAnnotation = "dev.cosignproject.cosign/image"
CarbideRegistry = "rgcrprod.azurecr.us"
ImageAnnotationKey = "hauler.dev/key"
ImageAnnotationPlatform = "hauler.dev/platform"
ImageAnnotationRegistry = "hauler.dev/registry"
)

283
pkg/content/chart/chart.go Normal file
View File

@@ -0,0 +1,283 @@
package chart
import (
"archive/tar"
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"io/fs"
"net/url"
"os"
"path/filepath"
gv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
gtypes "github.com/google/go-containerregistry/pkg/v1/types"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/rancherfederal/hauler/pkg/artifacts"
"github.com/rancherfederal/hauler/pkg/log"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chart/loader"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/registry"
"github.com/rancherfederal/hauler/pkg/consts"
"github.com/rancherfederal/hauler/pkg/layer"
)
var (
_ artifacts.OCI = (*Chart)(nil)
settings = cli.New()
)
// Chart implements the OCI interface for Chart API objects. API spec values are
// stored into the Repo, Name, and Version fields.
type Chart struct {
path string
annotations map[string]string
}
// NewChart is a helper method that returns NewLocalChart or NewRemoteChart depending on v1alpha1.Chart contents
func NewChart(name string, opts *action.ChartPathOptions) (*Chart, error) {
chartRef := name
actionConfig := new(action.Configuration)
if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), os.Getenv("HELM_DRIVER"), log.NewLogger(os.Stdout).Debugf); err != nil {
return nil, err
}
client := action.NewInstall(actionConfig)
client.ChartPathOptions.Version = opts.Version
registryClient, err := newRegistryClient(client.CertFile, client.KeyFile, client.CaFile,
client.InsecureSkipTLSverify, client.PlainHTTP)
if err != nil {
return nil, fmt.Errorf("missing registry client: %w", err)
}
client.SetRegistryClient(registryClient)
if registry.IsOCI(opts.RepoURL) {
chartRef = opts.RepoURL + "/" + name
} else if isUrl(opts.RepoURL) { // OCI Protocol registers as a valid URL
client.ChartPathOptions.RepoURL = opts.RepoURL
} else { // Handles cases like grafana/loki
chartRef = opts.RepoURL + "/" + name
}
chartPath, err := client.ChartPathOptions.LocateChart(chartRef, settings)
if err != nil {
return nil, err
}
return &Chart{
path: chartPath,
}, err
}
func (h *Chart) MediaType() string {
return consts.OCIManifestSchema1
}
func (h *Chart) Manifest() (*gv1.Manifest, error) {
cfgDesc, err := h.configDescriptor()
if err != nil {
return nil, err
}
var layerDescs []gv1.Descriptor
ls, err := h.Layers()
for _, l := range ls {
desc, err := partial.Descriptor(l)
if err != nil {
return nil, err
}
layerDescs = append(layerDescs, *desc)
}
return &gv1.Manifest{
SchemaVersion: 2,
MediaType: gtypes.MediaType(h.MediaType()),
Config: cfgDesc,
Layers: layerDescs,
Annotations: h.annotations,
}, nil
}
func (h *Chart) RawConfig() ([]byte, error) {
ch, err := loader.Load(h.path)
if err != nil {
return nil, err
}
return json.Marshal(ch.Metadata)
}
func (h *Chart) configDescriptor() (gv1.Descriptor, error) {
data, err := h.RawConfig()
if err != nil {
return gv1.Descriptor{}, err
}
hash, size, err := gv1.SHA256(bytes.NewBuffer(data))
if err != nil {
return gv1.Descriptor{}, err
}
return gv1.Descriptor{
MediaType: consts.ChartConfigMediaType,
Size: size,
Digest: hash,
}, nil
}
func (h *Chart) Load() (*chart.Chart, error) {
return loader.Load(h.path)
}
func (h *Chart) Layers() ([]gv1.Layer, error) {
chartDataLayer, err := h.chartData()
if err != nil {
return nil, err
}
return []gv1.Layer{
chartDataLayer,
// TODO: Add provenance
}, nil
}
func (h *Chart) RawChartData() ([]byte, error) {
return os.ReadFile(h.path)
}
// chartData loads the chart contents into memory and returns a NopCloser for the contents
//
// Normally we avoid loading into memory, but charts sizes are strictly capped at ~1MB
func (h *Chart) chartData() (gv1.Layer, error) {
info, err := os.Stat(h.path)
if err != nil {
return nil, err
}
var chartdata []byte
if info.IsDir() {
buf := &bytes.Buffer{}
gw := gzip.NewWriter(buf)
tw := tar.NewWriter(gw)
if err := filepath.WalkDir(h.path, func(path string, d fs.DirEntry, err error) error {
fi, err := d.Info()
if err != nil {
return err
}
header, err := tar.FileInfoHeader(fi, fi.Name())
if err != nil {
return err
}
rel, err := filepath.Rel(filepath.Dir(h.path), path)
if err != nil {
return err
}
header.Name = rel
if err := tw.WriteHeader(header); err != nil {
return err
}
if !d.IsDir() {
data, err := os.Open(path)
if err != nil {
return err
}
if _, err := io.Copy(tw, data); err != nil {
return err
}
}
return nil
}); err != nil {
return nil, err
}
if err := tw.Close(); err != nil {
return nil, err
}
if err := gw.Close(); err != nil {
return nil, err
}
chartdata = buf.Bytes()
} else {
data, err := os.ReadFile(h.path)
if err != nil {
return nil, err
}
chartdata = data
}
annotations := make(map[string]string)
annotations[ocispec.AnnotationTitle] = filepath.Base(h.path)
opener := func() layer.Opener {
return func() (io.ReadCloser, error) {
return io.NopCloser(bytes.NewBuffer(chartdata)), nil
}
}
chartDataLayer, err := layer.FromOpener(opener(),
layer.WithMediaType(consts.ChartLayerMediaType),
layer.WithAnnotations(annotations))
return chartDataLayer, err
}
func isUrl(name string) bool {
_, err := url.ParseRequestURI(name)
return err == nil
}
func newRegistryClient(certFile, keyFile, caFile string, insecureSkipTLSverify, plainHTTP bool) (*registry.Client, error) {
if certFile != "" && keyFile != "" || caFile != "" || insecureSkipTLSverify {
registryClient, err := newRegistryClientWithTLS(certFile, keyFile, caFile, insecureSkipTLSverify)
if err != nil {
return nil, err
}
return registryClient, nil
}
registryClient, err := newDefaultRegistryClient(plainHTTP)
if err != nil {
return nil, err
}
return registryClient, nil
}
func newDefaultRegistryClient(plainHTTP bool) (*registry.Client, error) {
opts := []registry.ClientOption{
registry.ClientOptDebug(settings.Debug),
registry.ClientOptEnableCache(true),
registry.ClientOptWriter(os.Stderr),
registry.ClientOptCredentialsFile(settings.RegistryConfig),
}
if plainHTTP {
opts = append(opts, registry.ClientOptPlainHTTP())
}
// Create a new registry client
registryClient, err := registry.NewClient(opts...)
if err != nil {
return nil, err
}
return registryClient, nil
}
func newRegistryClientWithTLS(certFile, keyFile, caFile string, insecureSkipTLSverify bool) (*registry.Client, error) {
// Create a new registry client
registryClient, err := registry.NewRegistryClientWithTLS(os.Stderr, certFile, keyFile, caFile, insecureSkipTLSverify,
settings.RegistryConfig, settings.Debug,
)
if err != nil {
return nil, err
}
return registryClient, nil
}

View File

@@ -0,0 +1,107 @@
package chart_test
import (
"os"
"reflect"
"testing"
v1 "github.com/google/go-containerregistry/pkg/v1"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"helm.sh/helm/v3/pkg/action"
"github.com/rancherfederal/hauler/pkg/consts"
"github.com/rancherfederal/hauler/pkg/content/chart"
)
func TestNewChart(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "hauler")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
type args struct {
name string
opts *action.ChartPathOptions
}
tests := []struct {
name string
args args
want v1.Descriptor
wantErr bool
}{
{
name: "should create from a chart archive",
args: args{
name: "rancher-cluster-templates-0.4.4.tgz",
opts: &action.ChartPathOptions{RepoURL: "../../../testdata"},
},
want: v1.Descriptor{
MediaType: consts.ChartLayerMediaType,
Size: 13102,
Digest: v1.Hash{
Algorithm: "sha256",
Hex: "4b3bb4e474b54bf9057b298f8f11c239bb561396716d8cd5fc369c407fba2965",
},
Annotations: map[string]string{
ocispec.AnnotationTitle: "rancher-cluster-templates-0.4.4.tgz",
},
},
wantErr: false,
},
// TODO: This isn't matching digests b/c of file timestamps not being respected
// {
// name: "should create from a chart directory",
// args: args{
// path: filepath.Join(tmpdir, "podinfo"),
// },
// want: want,
// wantErr: false,
// },
{
// TODO: Use a mock helm server
name: "should fetch a remote chart",
args: args{
name: "cert-manager",
opts: &action.ChartPathOptions{RepoURL: "https://charts.jetstack.io", Version: "1.14.4"},
},
want: v1.Descriptor{
MediaType: consts.ChartLayerMediaType,
Size: 80674,
Digest: v1.Hash{
Algorithm: "sha256",
Hex: "5775fdbc1881d6e510df76d38753af54b86bd14caa8edb28fdbb79527042dede",
},
Annotations: map[string]string{
ocispec.AnnotationTitle: "cert-manager-v1.14.4.tgz",
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := chart.NewChart(tt.args.name, tt.args.opts)
if (err != nil) != tt.wantErr {
t.Errorf("NewLocalChart() error = %v, wantErr %v", err, tt.wantErr)
return
}
m, err := got.Manifest()
if err != nil {
t.Error(err)
}
// TODO: This changes when we support provenance files
if len(m.Layers) > 1 {
t.Errorf("Expected 1 layer for chart, got %d", len(m.Layers))
}
desc := m.Layers[0]
if !reflect.DeepEqual(desc, tt.want) {
t.Errorf("got: %v\nwant: %v", desc, tt.want)
return
}
})
}
}

24
pkg/content/content.go Normal file
View File

@@ -0,0 +1,24 @@
package content
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/yaml"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
)
func Load(data []byte) (schema.ObjectKind, error) {
var tm metav1.TypeMeta
if err := yaml.Unmarshal(data, &tm); err != nil {
return nil, err
}
if tm.GroupVersionKind().GroupVersion() != v1alpha1.ContentGroupVersion && tm.GroupVersionKind().GroupVersion() != v1alpha1.CollectionGroupVersion {
return nil, fmt.Errorf("unrecognized content/collection type: %s", tm.GroupVersionKind().String())
}
return &tm, nil
}

313
pkg/content/oci.go Normal file
View File

@@ -0,0 +1,313 @@
package content
import (
"context"
"encoding/json"
"fmt"
"github.com/google/go-containerregistry/pkg/name"
"io"
"os"
"path/filepath"
"sort"
"strings"
"sync"
ccontent "github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes"
"github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"oras.land/oras-go/pkg/content"
"oras.land/oras-go/pkg/target"
"github.com/rancherfederal/hauler/pkg/consts"
"github.com/rancherfederal/hauler/pkg/reference"
)
var _ target.Target = (*OCI)(nil)
type OCI struct {
root string
index *ocispec.Index
nameMap *sync.Map // map[string]ocispec.Descriptor
}
func NewOCI(root string) (*OCI, error) {
o := &OCI{
root: root,
nameMap: &sync.Map{},
}
return o, nil
}
// AddIndex adds a descriptor to the index and updates it
//
// The descriptor must use AnnotationRefName to identify itself
func (o *OCI) AddIndex(desc ocispec.Descriptor) error {
if _, ok := desc.Annotations[ocispec.AnnotationRefName]; !ok {
return fmt.Errorf("descriptor must contain a reference from the annotation: %s", ocispec.AnnotationRefName)
}
key, err := reference.Parse(desc.Annotations[ocispec.AnnotationRefName])
if err != nil {
return err
}
if strings.TrimSpace(key.String()) != "--" {
switch key.(type) {
case name.Digest:
o.nameMap.Store(fmt.Sprintf("%s-%s", key.Context().String(), desc.Annotations[consts.KindAnnotationName]), desc)
case name.Tag:
o.nameMap.Store(fmt.Sprintf("%s-%s", key.String(), desc.Annotations[consts.KindAnnotationName]), desc)
}
}
return o.SaveIndex()
}
// LoadIndex will load the index from disk
func (o *OCI) LoadIndex() error {
path := o.path(consts.OCIImageIndexFile)
idx, err := os.Open(path)
if err != nil {
if !os.IsNotExist(err) {
return err
}
o.index = &ocispec.Index{
Versioned: specs.Versioned{
SchemaVersion: 2,
},
}
return nil
}
defer idx.Close()
if err := json.NewDecoder(idx).Decode(&o.index); err != nil {
return err
}
for _, desc := range o.index.Manifests {
key, err := reference.Parse(desc.Annotations[ocispec.AnnotationRefName])
if err != nil {
return err
}
if strings.TrimSpace(key.String()) != "--" {
switch key.(type) {
case name.Digest:
o.nameMap.Store(fmt.Sprintf("%s-%s", key.Context().String(), desc.Annotations[consts.KindAnnotationName]), desc)
case name.Tag:
o.nameMap.Store(fmt.Sprintf("%s-%s", key.String(), desc.Annotations[consts.KindAnnotationName]), desc)
}
}
}
return nil
}
// SaveIndex will update the index on disk
func (o *OCI) SaveIndex() error {
var descs []ocispec.Descriptor
o.nameMap.Range(func(name, desc interface{}) bool {
n := desc.(ocispec.Descriptor).Annotations[ocispec.AnnotationRefName]
d := desc.(ocispec.Descriptor)
if d.Annotations == nil {
d.Annotations = make(map[string]string)
}
d.Annotations[ocispec.AnnotationRefName] = n
descs = append(descs, d)
return true
})
// sort index to ensure that images come before any signatures and attestations.
sort.SliceStable(descs, func(i, j int) bool {
kindI := descs[i].Annotations["kind"]
kindJ := descs[j].Annotations["kind"]
// Objects with the prefix of "dev.cosignproject.cosign/image" should be at the top.
if strings.HasPrefix(kindI, consts.KindAnnotation) && !strings.HasPrefix(kindJ, consts.KindAnnotation) {
return true
} else if !strings.HasPrefix(kindI, consts.KindAnnotation) && strings.HasPrefix(kindJ, consts.KindAnnotation) {
return false
}
return false // Default: maintain the order.
})
o.index.Manifests = descs
data, err := json.Marshal(o.index)
if err != nil {
return err
}
return os.WriteFile(o.path(consts.OCIImageIndexFile), data, 0644)
}
// Resolve attempts to resolve the reference into a name and descriptor.
//
// The argument `ref` should be a scheme-less URI representing the remote.
// Structurally, it has a host and path. The "host" can be used to directly
// reference a specific host or be matched against a specific handler.
//
// The returned name should be used to identify the referenced entity.
// Dependending on the remote namespace, this may be immutable or mutable.
// While the name may differ from ref, it should itself be a valid ref.
//
// If the resolution fails, an error will be returned.
func (o *OCI) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
if err := o.LoadIndex(); err != nil {
return "", ocispec.Descriptor{}, err
}
d, ok := o.nameMap.Load(ref)
if !ok {
return "", ocispec.Descriptor{}, err
}
desc = d.(ocispec.Descriptor)
return ref, desc, nil
}
// Fetcher returns a new fetcher for the provided reference.
// All content fetched from the returned fetcher will be
// from the namespace referred to by ref.
func (o *OCI) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
if err := o.LoadIndex(); err != nil {
return nil, err
}
if _, ok := o.nameMap.Load(ref); !ok {
return nil, nil
}
return o, nil
}
func (o *OCI) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
readerAt, err := o.blobReaderAt(desc)
if err != nil {
return nil, err
}
return readerAt, nil
}
func (o *OCI) FetchManifest(ctx context.Context, manifest ocispec.Manifest) (io.ReadCloser, error) {
readerAt, err := o.manifestBlobReaderAt(manifest)
if err != nil {
return nil, err
}
return readerAt, nil
}
// Pusher returns a new pusher for the provided reference
// The returned Pusher should satisfy content.Ingester and concurrent attempts
// to push the same blob using the Ingester API should result in ErrUnavailable.
func (o *OCI) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
if err := o.LoadIndex(); err != nil {
return nil, err
}
var baseRef, hash string
parts := strings.SplitN(ref, "@", 2)
baseRef = parts[0]
if len(parts) > 1 {
hash = parts[1]
}
return &ociPusher{
oci: o,
ref: baseRef,
digest: hash,
}, nil
}
func (o *OCI) Walk(fn func(reference string, desc ocispec.Descriptor) error) error {
if err := o.LoadIndex(); err != nil {
return err
}
var errst []string
o.nameMap.Range(func(key, value interface{}) bool {
if err := fn(key.(string), value.(ocispec.Descriptor)); err != nil {
errst = append(errst, err.Error())
}
return true
})
if errst != nil {
return fmt.Errorf(strings.Join(errst, "; "))
}
return nil
}
func (o *OCI) blobReaderAt(desc ocispec.Descriptor) (*os.File, error) {
blobPath, err := o.ensureBlob(desc.Digest.Algorithm().String(), desc.Digest.Hex())
if err != nil {
return nil, err
}
return os.Open(blobPath)
}
func (o *OCI) manifestBlobReaderAt(manifest ocispec.Manifest) (*os.File, error) {
blobPath, err := o.ensureBlob(string(manifest.Config.Digest.Algorithm().String()), manifest.Config.Digest.Hex())
if err != nil {
return nil, err
}
return os.Open(blobPath)
}
func (o *OCI) blobWriterAt(desc ocispec.Descriptor) (*os.File, error) {
blobPath, err := o.ensureBlob(desc.Digest.Algorithm().String(), desc.Digest.Hex())
if err != nil {
return nil, err
}
return os.OpenFile(blobPath, os.O_WRONLY|os.O_CREATE, 0644)
}
func (o *OCI) ensureBlob(alg string, hex string) (string, error) {
dir := o.path("blobs", alg)
if err := os.MkdirAll(dir, os.ModePerm); err != nil && !os.IsExist(err) {
return "", err
}
return filepath.Join(dir, hex), nil
}
func (o *OCI) path(elem ...string) string {
complete := []string{string(o.root)}
return filepath.Join(append(complete, elem...)...)
}
type ociPusher struct {
oci *OCI
ref string
digest string
}
// Push returns a content writer for the given resource identified
// by the descriptor.
func (p *ociPusher) Push(ctx context.Context, d ocispec.Descriptor) (ccontent.Writer, error) {
switch d.MediaType {
case ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex, consts.DockerManifestSchema2, consts.DockerManifestListSchema2:
// if the hash of the content matches that which was provided as the hash for the root, mark it
if p.digest != "" && p.digest == d.Digest.String() {
if err := p.oci.LoadIndex(); err != nil {
return nil, err
}
p.oci.nameMap.Store(p.ref, d)
if err := p.oci.SaveIndex(); err != nil {
return nil, err
}
}
}
blobPath, err := p.oci.ensureBlob(d.Digest.Algorithm().String(), d.Digest.Hex())
if err != nil {
return nil, err
}
if _, err := os.Stat(blobPath); err == nil {
// file already exists, discard (but validate digest)
return content.NewIoContentWriter(io.Discard, content.WithOutputHash(d.Digest)), nil
}
f, err := os.Create(blobPath)
if err != nil {
return nil, err
}
w := content.NewIoContentWriter(f, content.WithInputHash(d.Digest), content.WithOutputHash(d.Digest))
return w, nil
}

280
pkg/cosign/cosign.go Normal file
View File

@@ -0,0 +1,280 @@
package cosign
import (
"bufio"
"context"
"embed"
"fmt"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"strings"
"time"
"oras.land/oras-go/pkg/content"
"github.com/rancherfederal/hauler/pkg/artifacts/image"
"github.com/rancherfederal/hauler/pkg/log"
"github.com/rancherfederal/hauler/pkg/store"
)
const maxRetries = 3
const retryDelay = time.Second * 5
// VerifyFileSignature verifies the digital signature of a file using Sigstore/Cosign.
func VerifySignature(ctx context.Context, s *store.Layout, keyPath string, ref string) error {
operation := func() error {
cosignBinaryPath, err := getCosignPath()
if err != nil {
return err
}
cmd := exec.Command(cosignBinaryPath, "verify", "--insecure-ignore-tlog", "--key", keyPath, ref)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("error verifying signature: %v, output: %s", err, output)
}
return nil
}
return RetryOperation(ctx, operation)
}
// SaveImage saves image and any signatures/attestations to the store.
func SaveImage(ctx context.Context, s *store.Layout, ref string, platform string) error {
l := log.FromContext(ctx)
operation := func() error {
cosignBinaryPath, err := getCosignPath()
if err != nil {
return err
}
// check to see if the image is multi-arch
isMultiArch, err := image.IsMultiArchImage(ref)
if err != nil {
return err
}
l.Debugf("multi-arch image: %v", isMultiArch)
cmd := exec.Command(cosignBinaryPath, "save", ref, "--dir", s.Root)
// Conditionally add platform.
if platform != "" && isMultiArch {
l.Debugf("platform for image [%s]", platform)
cmd.Args = append(cmd.Args, "--platform", platform)
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return err
}
// start the command after having set up the pipe
if err := cmd.Start(); err != nil {
return err
}
// read command's stdout line by line
output := bufio.NewScanner(stdout)
for output.Scan() {
l.Debugf(output.Text()) // write each line to your log, or anything you need
}
if err := output.Err(); err != nil {
cmd.Wait()
return err
}
// read command's stderr line by line
errors := bufio.NewScanner(stderr)
for errors.Scan() {
l.Errorf(errors.Text()) // write each line to your log, or anything you need
}
if err := errors.Err(); err != nil {
cmd.Wait()
return err
}
// Wait for the command to finish
err = cmd.Wait()
if err != nil {
return err
}
return nil
}
return RetryOperation(ctx, operation)
}
// LoadImage loads store to a remote registry.
func LoadImages(ctx context.Context, s *store.Layout, registry string, ropts content.RegistryOptions) error {
l := log.FromContext(ctx)
cosignBinaryPath, err := getCosignPath()
if err != nil {
return err
}
cmd := exec.Command(cosignBinaryPath, "load", "--registry", registry, "--dir", s.Root)
// Conditionally add extra registry flags.
if ropts.Insecure {
cmd.Args = append(cmd.Args, "--allow-insecure-registry=true")
}
if ropts.PlainHTTP {
cmd.Args = append(cmd.Args, "--allow-http-registry=true")
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return err
}
// start the command after having set up the pipe
if err := cmd.Start(); err != nil {
return err
}
// read command's stdout line by line
output := bufio.NewScanner(stdout)
for output.Scan() {
l.Infof(output.Text()) // write each line to your log, or anything you need
}
if err := output.Err(); err != nil {
cmd.Wait()
return err
}
// read command's stderr line by line
errors := bufio.NewScanner(stderr)
for errors.Scan() {
l.Errorf(errors.Text()) // write each line to your log, or anything you need
}
if err := errors.Err(); err != nil {
cmd.Wait()
return err
}
// Wait for the command to finish
err = cmd.Wait()
if err != nil {
return err
}
return nil
}
// RegistryLogin - performs cosign login
func RegistryLogin(ctx context.Context, s *store.Layout, registry string, ropts content.RegistryOptions) error {
log := log.FromContext(ctx)
cosignBinaryPath, err := getCosignPath()
if err != nil {
return err
}
cmd := exec.Command(cosignBinaryPath, "login", registry, "-u", ropts.Username, "-p", ropts.Password)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("error logging into registry: %v, output: %s", err, output)
}
log.Infof(strings.Trim(string(output), "\n"))
return nil
}
func RetryOperation(ctx context.Context, operation func() error) error {
l := log.FromContext(ctx)
for attempt := 1; attempt <= maxRetries; attempt++ {
err := operation()
if err == nil {
// If the operation succeeds, return nil (no error).
return nil
}
// Log the error for the current attempt.
l.Errorf("error (attempt %d/%d): %v", attempt, maxRetries, err)
// If this is not the last attempt, wait before retrying.
if attempt < maxRetries {
time.Sleep(retryDelay)
}
}
// If all attempts fail, return an error.
return fmt.Errorf("operation failed after %d attempts", maxRetries)
}
func EnsureBinaryExists(ctx context.Context, bin embed.FS) error {
// Set up a path for the binary to be copied.
binaryPath, err := getCosignPath()
if err != nil {
return fmt.Errorf("error: %v", err)
}
// Determine the architecture so that we pull the correct embedded binary.
arch := runtime.GOARCH
rOS := runtime.GOOS
binaryName := "cosign"
if rOS == "windows" {
binaryName = fmt.Sprintf("cosign-%s-%s.exe", rOS, arch)
} else {
binaryName = fmt.Sprintf("cosign-%s-%s", rOS, arch)
}
// retrieve the embedded binary
f, err := bin.ReadFile(fmt.Sprintf("binaries/%s", binaryName))
if err != nil {
return fmt.Errorf("error: %v", err)
}
// write the binary to the filesystem
err = os.WriteFile(binaryPath, f, 0755)
if err != nil {
return fmt.Errorf("error: %v", err)
}
return nil
}
// getCosignPath returns the binary path
func getCosignPath() (string, error) {
// Get the current user's information
currentUser, err := user.Current()
if err != nil {
return "", fmt.Errorf("error: %v", err)
}
// Get the user's home directory
homeDir := currentUser.HomeDir
// Construct the path to the .hauler directory
haulerDir := filepath.Join(homeDir, ".hauler")
// Create the .hauler directory if it doesn't exist
if _, err := os.Stat(haulerDir); os.IsNotExist(err) {
// .hauler directory does not exist, create it
if err := os.MkdirAll(haulerDir, 0755); err != nil {
return "", fmt.Errorf("error creating .hauler directory: %v", err)
}
}
// Determine the binary name.
rOS := runtime.GOOS
binaryName := "cosign"
if rOS == "windows" {
binaryName = "cosign.exe"
}
// construct path to binary
binaryPath := filepath.Join(haulerDir, binaryName)
return binaryPath, nil
}

View File

@@ -1,49 +0,0 @@
package driver
import (
"context"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"io"
"sigs.k8s.io/cli-utils/pkg/object"
)
type Driver interface {
Name() string
//TODO: Really want this to just return a usable client
KubeConfigPath() string
Images(ctx context.Context) (map[name.Reference]v1.Image, error)
Binary() (io.ReadCloser, error)
SystemObjects() []object.ObjMetadata
Start(io.Writer) error
DataPath(...string) string
WriteConfig() error
}
//NewDriver will return a new concrete Driver type given a kind
func NewDriver(driver v1alpha1.Driver) (d Driver) {
switch driver.Type {
case "rke2":
// TODO
default:
d = K3s{
Version: driver.Version,
Config: K3sConfig{
DataDir: "/var/lib/rancher/k3s",
KubeConfig: "/etc/rancher/k3s/k3s.yaml",
KubeConfigMode: "0644",
Disable: nil,
},
}
}
return
}

View File

@@ -1,872 +0,0 @@
#!/bin/sh
set -e
set -o noglob
# Usage:
# curl ... | ENV_VAR=... sh -
# or
# ENV_VAR=... ./install.sh
#
# Example:
# Installing a server without traefik:
# curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh -
# Installing an agent to point at a server:
# curl ... | K3S_TOKEN=xxx K3S_URL=https://server-url:6443 sh -
#
# Environment variables:
# - K3S_*
# Environment variables which begin with K3S_ will be preserved for the
# systemd service to use. Setting K3S_URL without explicitly setting
# a systemd exec command will default the command to "agent", and we
# enforce that K3S_TOKEN or K3S_CLUSTER_SECRET is also set.
#
# - INSTALL_K3S_SKIP_DOWNLOAD
# If set to true will not download k3s hash or binary.
#
# - INSTALL_K3S_FORCE_RESTART
# If set to true will always restart the K3s service
#
# - INSTALL_K3S_SYMLINK
# If set to 'skip' will not create symlinks, 'force' will overwrite,
# default will symlink if command does not exist in path.
#
# - INSTALL_K3S_SKIP_ENABLE
# If set to true will not enable or start k3s service.
#
# - INSTALL_K3S_SKIP_START
# If set to true will not start k3s service.
#
# - INSTALL_K3S_VERSION
# Version of k3s to download from github. Will attempt to download from the
# stable channel if not specified.
#
# - INSTALL_K3S_COMMIT
# Commit of k3s to download from temporary cloud storage.
# * (for developer & QA use)
#
# - INSTALL_K3S_BIN_DIR
# Directory to install k3s binary, links, and uninstall script to, or use
# /usr/local/bin as the default
#
# - INSTALL_K3S_BIN_DIR_READ_ONLY
# If set to true will not write files to INSTALL_K3S_BIN_DIR, forces
# setting INSTALL_K3S_SKIP_DOWNLOAD=true
#
# - INSTALL_K3S_SYSTEMD_DIR
# Directory to install systemd service and environment files to, or use
# /etc/systemd/system as the default
#
# - INSTALL_K3S_EXEC or script arguments
# Command with flags to use for launching k3s in the systemd service, if
# the command is not specified will default to "agent" if K3S_URL is set
# or "server" if not. The final systemd command resolves to a combination
# of EXEC and script args ($@).
#
# The following commands result in the same behavior:
# curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh -s -
# curl ... | INSTALL_K3S_EXEC="server --disable=traefik" sh -s -
# curl ... | INSTALL_K3S_EXEC="server" sh -s - --disable=traefik
# curl ... | sh -s - server --disable=traefik
# curl ... | sh -s - --disable=traefik
#
# - INSTALL_K3S_NAME
# Name of systemd service to create, will default from the k3s exec command
# if not specified. If specified the name will be prefixed with 'k3s-'.
#
# - INSTALL_K3S_TYPE
# Type of systemd service to create, will default from the k3s exec command
# if not specified.
#
# - INSTALL_K3S_SELINUX_WARN
# If set to true will continue if k3s-selinux policy is not found.
#
# - INSTALL_K3S_SKIP_SELINUX_RPM
# If set to true will skip automatic installation of the k3s RPM.
#
# - INSTALL_K3S_CHANNEL_URL
# Channel URL for fetching k3s download URL.
# Defaults to 'https://update.k3s.io/v1-release/channels'.
#
# - INSTALL_K3S_CHANNEL
# Channel to use for fetching k3s download URL.
# Defaults to 'stable'.
GITHUB_URL=https://github.com/k3s-io/k3s/releases
STORAGE_URL=https://storage.googleapis.com/k3s-ci-builds
DOWNLOADER=
# --- helper functions for logs ---
info()
{
echo '[INFO] ' "$@"
}
warn()
{
echo '[WARN] ' "$@" >&2
}
fatal()
{
echo '[ERROR] ' "$@" >&2
exit 1
}
# --- fatal if no systemd or openrc ---
verify_system() {
if [ -x /sbin/openrc-run ]; then
HAS_OPENRC=true
return
fi
if [ -d /run/systemd ]; then
HAS_SYSTEMD=true
return
fi
fatal 'Can not find systemd or openrc to use as a process supervisor for k3s'
}
# --- add quotes to command arguments ---
quote() {
for arg in "$@"; do
printf '%s\n' "$arg" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/'/"
done
}
# --- add indentation and trailing slash to quoted args ---
quote_indent() {
printf ' \\\n'
for arg in "$@"; do
printf '\t%s \\\n' "$(quote "$arg")"
done
}
# --- escape most punctuation characters, except quotes, forward slash, and space ---
escape() {
printf '%s' "$@" | sed -e 's/\([][!#$%&()*;<=>?\_`{|}]\)/\\\1/g;'
}
# --- escape double quotes ---
escape_dq() {
printf '%s' "$@" | sed -e 's/"/\\"/g'
}
# --- ensures $K3S_URL is empty or begins with https://, exiting fatally otherwise ---
verify_k3s_url() {
case "${K3S_URL}" in
"")
;;
https://*)
;;
*)
fatal "Only https:// URLs are supported for K3S_URL (have ${K3S_URL})"
;;
esac
}
# --- define needed environment variables ---
setup_env() {
# --- use command args if passed or create default ---
case "$1" in
# --- if we only have flags discover if command should be server or agent ---
(-*|"")
if [ -z "${K3S_URL}" ]; then
CMD_K3S=server
else
if [ -z "${K3S_TOKEN}" ] && [ -z "${K3S_TOKEN_FILE}" ] && [ -z "${K3S_CLUSTER_SECRET}" ]; then
fatal "Defaulted k3s exec command to 'agent' because K3S_URL is defined, but K3S_TOKEN, K3S_TOKEN_FILE or K3S_CLUSTER_SECRET is not defined."
fi
CMD_K3S=agent
fi
;;
# --- command is provided ---
(*)
CMD_K3S=$1
shift
;;
esac
verify_k3s_url
CMD_K3S_EXEC="${CMD_K3S}$(quote_indent "$@")"
# --- use systemd name if defined or create default ---
if [ -n "${INSTALL_K3S_NAME}" ]; then
SYSTEM_NAME=k3s-${INSTALL_K3S_NAME}
else
if [ "${CMD_K3S}" = server ]; then
SYSTEM_NAME=k3s
else
SYSTEM_NAME=k3s-${CMD_K3S}
fi
fi
# --- check for invalid characters in system name ---
valid_chars=$(printf '%s' "${SYSTEM_NAME}" | sed -e 's/[][!#$%&()*;<=>?\_`{|}/[:space:]]/^/g;' )
if [ "${SYSTEM_NAME}" != "${valid_chars}" ]; then
invalid_chars=$(printf '%s' "${valid_chars}" | sed -e 's/[^^]/ /g')
fatal "Invalid characters for system name:
${SYSTEM_NAME}
${invalid_chars}"
fi
# --- use sudo if we are not already root ---
SUDO=sudo
if [ $(id -u) -eq 0 ]; then
SUDO=
fi
# --- use systemd type if defined or create default ---
if [ -n "${INSTALL_K3S_TYPE}" ]; then
SYSTEMD_TYPE=${INSTALL_K3S_TYPE}
else
if [ "${CMD_K3S}" = server ]; then
SYSTEMD_TYPE=notify
else
SYSTEMD_TYPE=exec
fi
fi
# --- use binary install directory if defined or create default ---
if [ -n "${INSTALL_K3S_BIN_DIR}" ]; then
BIN_DIR=${INSTALL_K3S_BIN_DIR}
else
# --- use /usr/local/bin if root can write to it, otherwise use /opt/bin if it exists
BIN_DIR=/usr/local/bin
if ! $SUDO sh -c "touch ${BIN_DIR}/k3s-ro-test && rm -rf ${BIN_DIR}/k3s-ro-test"; then
if [ -d /opt/bin ]; then
BIN_DIR=/opt/bin
fi
fi
fi
# --- use systemd directory if defined or create default ---
if [ -n "${INSTALL_K3S_SYSTEMD_DIR}" ]; then
SYSTEMD_DIR="${INSTALL_K3S_SYSTEMD_DIR}"
else
SYSTEMD_DIR=/etc/systemd/system
fi
# --- set related files from system name ---
SERVICE_K3S=${SYSTEM_NAME}.service
UNINSTALL_K3S_SH=${UNINSTALL_K3S_SH:-${BIN_DIR}/${SYSTEM_NAME}-uninstall.sh}
KILLALL_K3S_SH=${KILLALL_K3S_SH:-${BIN_DIR}/k3s-killall.sh}
# --- use service or environment location depending on systemd/openrc ---
if [ "${HAS_SYSTEMD}" = true ]; then
FILE_K3S_SERVICE=${SYSTEMD_DIR}/${SERVICE_K3S}
FILE_K3S_ENV=${SYSTEMD_DIR}/${SERVICE_K3S}.env
elif [ "${HAS_OPENRC}" = true ]; then
$SUDO mkdir -p /etc/rancher/k3s
FILE_K3S_SERVICE=/etc/init.d/${SYSTEM_NAME}
FILE_K3S_ENV=/etc/rancher/k3s/${SYSTEM_NAME}.env
fi
# --- get hash of config & exec for currently installed k3s ---
PRE_INSTALL_HASHES=$(get_installed_hashes)
# --- if bin directory is read only skip download ---
if [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ]; then
INSTALL_K3S_SKIP_DOWNLOAD=true
fi
# --- setup channel values
INSTALL_K3S_CHANNEL_URL=${INSTALL_K3S_CHANNEL_URL:-'https://update.k3s.io/v1-release/channels'}
INSTALL_K3S_CHANNEL=${INSTALL_K3S_CHANNEL:-'stable'}
}
# --- check if skip download environment variable set ---
can_skip_download() {
if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ]; then
return 1
fi
}
# --- verify an executable k3s binary is installed ---
verify_k3s_is_executable() {
if [ ! -x ${BIN_DIR}/k3s ]; then
fatal "Executable k3s binary not found at ${BIN_DIR}/k3s"
fi
}
# --- set arch and suffix, fatal if architecture not supported ---
setup_verify_arch() {
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
case $ARCH in
amd64)
ARCH=amd64
SUFFIX=
;;
x86_64)
ARCH=amd64
SUFFIX=
;;
arm64)
ARCH=arm64
SUFFIX=-${ARCH}
;;
aarch64)
ARCH=arm64
SUFFIX=-${ARCH}
;;
arm*)
ARCH=arm
SUFFIX=-${ARCH}hf
;;
*)
fatal "Unsupported architecture $ARCH"
esac
}
# --- verify existence of network downloader executable ---
verify_downloader() {
# Return failure if it doesn't exist or is no executable
[ -x "$(command -v $1)" ] || return 1
# Set verified executable as our downloader program and return success
DOWNLOADER=$1
return 0
}
# --- create temporary directory and cleanup when done ---
setup_tmp() {
TMP_DIR=$(mktemp -d -t k3s-install.XXXXXXXXXX)
TMP_HASH=${TMP_DIR}/k3s.hash
TMP_BIN=${TMP_DIR}/k3s.bin
cleanup() {
code=$?
set +e
trap - EXIT
rm -rf ${TMP_DIR}
exit $code
}
trap cleanup INT EXIT
}
# --- use desired k3s version if defined or find version from channel ---
get_release_version() {
if [ -n "${INSTALL_K3S_COMMIT}" ]; then
VERSION_K3S="commit ${INSTALL_K3S_COMMIT}"
elif [ -n "${INSTALL_K3S_VERSION}" ]; then
VERSION_K3S=${INSTALL_K3S_VERSION}
else
info "Finding release for channel ${INSTALL_K3S_CHANNEL}"
version_url="${INSTALL_K3S_CHANNEL_URL}/${INSTALL_K3S_CHANNEL}"
case $DOWNLOADER in
curl)
VERSION_K3S=$(curl -w '%{url_effective}' -L -s -S ${version_url} -o /dev/null | sed -e 's|.*/||')
;;
wget)
VERSION_K3S=$(wget -SqO /dev/null ${version_url} 2>&1 | grep -i Location | sed -e 's|.*/||')
;;
*)
fatal "Incorrect downloader executable '$DOWNLOADER'"
;;
esac
fi
info "Using ${VERSION_K3S} as release"
}
# --- download from github url ---
download() {
[ $# -eq 2 ] || fatal 'download needs exactly 2 arguments'
case $DOWNLOADER in
curl)
curl -o $1 -sfL $2
;;
wget)
wget -qO $1 $2
;;
*)
fatal "Incorrect executable '$DOWNLOADER'"
;;
esac
# Abort if download command failed
[ $? -eq 0 ] || fatal 'Download failed'
}
# --- download hash from github url ---
download_hash() {
if [ -n "${INSTALL_K3S_COMMIT}" ]; then
HASH_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}.sha256sum
else
HASH_URL=${GITHUB_URL}/download/${VERSION_K3S}/sha256sum-${ARCH}.txt
fi
info "Downloading hash ${HASH_URL}"
download ${TMP_HASH} ${HASH_URL}
HASH_EXPECTED=$(grep " k3s${SUFFIX}$" ${TMP_HASH})
HASH_EXPECTED=${HASH_EXPECTED%%[[:blank:]]*}
}
# --- check hash against installed version ---
installed_hash_matches() {
if [ -x ${BIN_DIR}/k3s ]; then
HASH_INSTALLED=$(sha256sum ${BIN_DIR}/k3s)
HASH_INSTALLED=${HASH_INSTALLED%%[[:blank:]]*}
if [ "${HASH_EXPECTED}" = "${HASH_INSTALLED}" ]; then
return
fi
fi
return 1
}
# --- download binary from github url ---
download_binary() {
if [ -n "${INSTALL_K3S_COMMIT}" ]; then
BIN_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}
else
BIN_URL=${GITHUB_URL}/download/${VERSION_K3S}/k3s${SUFFIX}
fi
info "Downloading binary ${BIN_URL}"
download ${TMP_BIN} ${BIN_URL}
}
# --- verify downloaded binary hash ---
verify_binary() {
info "Verifying binary download"
HASH_BIN=$(sha256sum ${TMP_BIN})
HASH_BIN=${HASH_BIN%%[[:blank:]]*}
if [ "${HASH_EXPECTED}" != "${HASH_BIN}" ]; then
fatal "Download sha256 does not match ${HASH_EXPECTED}, got ${HASH_BIN}"
fi
}
# --- setup permissions and move binary to system directory ---
setup_binary() {
chmod 755 ${TMP_BIN}
info "Installing k3s to ${BIN_DIR}/k3s"
$SUDO chown root:root ${TMP_BIN}
$SUDO mv -f ${TMP_BIN} ${BIN_DIR}/k3s
}
# --- setup selinux policy ---
setup_selinux() {
case ${INSTALL_K3S_CHANNEL} in
*testing)
rpm_channel=testing
;;
*latest)
rpm_channel=latest
;;
*)
rpm_channel=stable
;;
esac
rpm_site="rpm.rancher.io"
if [ "${rpm_channel}" = "testing" ]; then
rpm_site="rpm-testing.rancher.io"
fi
policy_hint="please install:
yum install -y container-selinux selinux-policy-base
yum install -y https://${rpm_site}/k3s/${rpm_channel}/common/centos/7/noarch/k3s-selinux-0.2-1.el7_8.noarch.rpm
"
policy_error=fatal
if [ "$INSTALL_K3S_SELINUX_WARN" = true ] || grep -q 'ID=flatcar' /etc/os-release; then
policy_error=warn
fi
if [ "$INSTALL_K3S_SKIP_SELINUX_RPM" = true ] || can_skip_download; then
info "Skipping installation of SELinux RPM"
else
install_selinux_rpm ${rpm_site} ${rpm_channel}
fi
if ! $SUDO chcon -u system_u -r object_r -t container_runtime_exec_t ${BIN_DIR}/k3s >/dev/null 2>&1; then
if $SUDO grep '^\s*SELINUX=enforcing' /etc/selinux/config >/dev/null 2>&1; then
$policy_error "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, ${policy_hint}"
fi
else
if [ ! -f /usr/share/selinux/packages/k3s.pp ]; then
$policy_error "Failed to find the k3s-selinux policy, ${policy_hint}"
fi
fi
}
# --- if on an el7/el8 system, install k3s-selinux
install_selinux_rpm() {
if [ -r /etc/redhat-release ] || [ -r /etc/centos-release ] || [ -r /etc/oracle-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
maj_ver=$(echo "$dist_version" | sed -E -e "s/^([0-9]+)\.?[0-9]*$/\1/")
set +o noglob
$SUDO rm -f /etc/yum.repos.d/rancher-k3s-common*.repo
set -o noglob
if [ -r /etc/redhat-release ]; then
case ${maj_ver} in
7)
$SUDO yum -y install yum-utils
$SUDO yum-config-manager --enable rhel-7-server-extras-rpms
;;
8)
:
;;
*)
return
;;
esac
fi
$SUDO tee /etc/yum.repos.d/rancher-k3s-common.repo >/dev/null << EOF
[rancher-k3s-common-${2}]
name=Rancher K3s Common (${2})
baseurl=https://${1}/k3s/${2}/common/centos/${maj_ver}/noarch
enabled=1
gpgcheck=1
gpgkey=https://${1}/public.key
EOF
$SUDO yum -y install "k3s-selinux"
fi
return
}
# --- download and verify k3s ---
download_and_verify() {
if can_skip_download; then
info 'Skipping k3s download and verify'
verify_k3s_is_executable
return
fi
setup_verify_arch
verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files'
setup_tmp
get_release_version
download_hash
if installed_hash_matches; then
info 'Skipping binary downloaded, installed k3s matches hash'
return
fi
download_binary
verify_binary
setup_binary
}
# --- add additional utility links ---
create_symlinks() {
[ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return
[ "${INSTALL_K3S_SYMLINK}" = skip ] && return
for cmd in kubectl crictl ctr; do
if [ ! -e ${BIN_DIR}/${cmd} ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then
which_cmd=$(command -v ${cmd} 2>/dev/null || true)
if [ -z "${which_cmd}" ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then
info "Creating ${BIN_DIR}/${cmd} symlink to k3s"
$SUDO ln -sf k3s ${BIN_DIR}/${cmd}
else
info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, command exists in PATH at ${which_cmd}"
fi
else
info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, already exists"
fi
done
}
# --- create killall script ---
create_killall() {
[ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return
info "Creating killall script ${KILLALL_K3S_SH}"
$SUDO tee ${KILLALL_K3S_SH} >/dev/null << \EOF
#!/bin/sh
[ $(id -u) -eq 0 ] || exec sudo $0 $@
for bin in /var/lib/rancher/k3s/data/**/bin/; do
[ -d $bin ] && export PATH=$PATH:$bin:$bin/aux
done
set -x
for service in /etc/systemd/system/k3s*.service; do
[ -s $service ] && systemctl stop $(basename $service)
done
for service in /etc/init.d/k3s*; do
[ -x $service ] && $service stop
done
pschildren() {
ps -e -o ppid= -o pid= | \
sed -e 's/^\s*//g; s/\s\s*/\t/g;' | \
grep -w "^$1" | \
cut -f2
}
pstree() {
for pid in $@; do
echo $pid
for child in $(pschildren $pid); do
pstree $child
done
done
}
killtree() {
kill -9 $(
{ set +x; } 2>/dev/null;
pstree $@;
set -x;
) 2>/dev/null
}
getshims() {
ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1
}
killtree $({ set +x; } 2>/dev/null; getshims; set -x)
do_unmount_and_remove() {
awk -v path="$1" '$2 ~ ("^" path) { print $2 }' /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount "$0" && rm -rf "$0"'
}
do_unmount_and_remove '/run/k3s'
do_unmount_and_remove '/var/lib/rancher/k3s'
do_unmount_and_remove '/var/lib/kubelet/pods'
do_unmount_and_remove '/var/lib/kubelet/plugins'
do_unmount_and_remove '/run/netns/cni-'
# Remove CNI namespaces
ip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns delete
# Delete network interface(s) that match 'master cni0'
ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do
iface=${iface%%@*}
[ -z "$iface" ] || ip link delete $iface
done
ip link delete cni0
ip link delete flannel.1
rm -rf /var/lib/cni/
iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore
EOF
$SUDO chmod 755 ${KILLALL_K3S_SH}
$SUDO chown root:root ${KILLALL_K3S_SH}
}
# --- create uninstall script ---
create_uninstall() {
[ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return
info "Creating uninstall script ${UNINSTALL_K3S_SH}"
$SUDO tee ${UNINSTALL_K3S_SH} >/dev/null << EOF
#!/bin/sh
set -x
[ \$(id -u) -eq 0 ] || exec sudo \$0 \$@
${KILLALL_K3S_SH}
if command -v systemctl; then
systemctl disable ${SYSTEM_NAME}
systemctl reset-failed ${SYSTEM_NAME}
systemctl daemon-reload
fi
if command -v rc-update; then
rc-update delete ${SYSTEM_NAME} default
fi
rm -f ${FILE_K3S_SERVICE}
rm -f ${FILE_K3S_ENV}
remove_uninstall() {
rm -f ${UNINSTALL_K3S_SH}
}
trap remove_uninstall EXIT
if (ls ${SYSTEMD_DIR}/k3s*.service || ls /etc/init.d/k3s*) >/dev/null 2>&1; then
set +x; echo 'Additional k3s services installed, skipping uninstall of k3s'; set -x
exit
fi
for cmd in kubectl crictl ctr; do
if [ -L ${BIN_DIR}/\$cmd ]; then
rm -f ${BIN_DIR}/\$cmd
fi
done
rm -rf /etc/rancher/k3s
rm -rf /run/k3s
rm -rf /run/flannel
rm -rf /var/lib/rancher/k3s
rm -rf /var/lib/kubelet
rm -f ${BIN_DIR}/k3s
rm -f ${KILLALL_K3S_SH}
if type yum >/dev/null 2>&1; then
yum remove -y k3s-selinux
rm -f /etc/yum.repos.d/rancher-k3s-common*.repo
fi
EOF
$SUDO chmod 755 ${UNINSTALL_K3S_SH}
$SUDO chown root:root ${UNINSTALL_K3S_SH}
}
# --- disable current service if loaded --
systemd_disable() {
$SUDO systemctl disable ${SYSTEM_NAME} >/dev/null 2>&1 || true
$SUDO rm -f /etc/systemd/system/${SERVICE_K3S} || true
$SUDO rm -f /etc/systemd/system/${SERVICE_K3S}.env || true
}
# --- capture current env and create file containing k3s_ variables ---
create_env_file() {
info "env: Creating environment file ${FILE_K3S_ENV}"
$SUDO touch ${FILE_K3S_ENV}
$SUDO chmod 0600 ${FILE_K3S_ENV}
env | grep '^K3S_' | $SUDO tee ${FILE_K3S_ENV} >/dev/null
env | grep -Ei '^(NO|HTTP|HTTPS)_PROXY' | $SUDO tee -a ${FILE_K3S_ENV} >/dev/null
}
# --- write systemd service file ---
create_systemd_service_file() {
info "systemd: Creating service file ${FILE_K3S_SERVICE}"
$SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
Wants=network-online.target
After=network-online.target
[Install]
WantedBy=multi-user.target
[Service]
Type=${SYSTEMD_TYPE}
EnvironmentFile=-/etc/default/%N
EnvironmentFile=-/etc/sysconfig/%N
EnvironmentFile=-${FILE_K3S_ENV}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=0
Restart=always
RestartSec=5s
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=${BIN_DIR}/k3s \\
${CMD_K3S_EXEC}
EOF
}
# --- write openrc service file ---
create_openrc_service_file() {
LOG_FILE=/var/log/${SYSTEM_NAME}.log
info "openrc: Creating service file ${FILE_K3S_SERVICE}"
$SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF
#!/sbin/openrc-run
depend() {
after network-online
want cgroups
}
start_pre() {
rm -f /tmp/k3s.*
}
supervisor=supervise-daemon
name=${SYSTEM_NAME}
command="${BIN_DIR}/k3s"
command_args="$(escape_dq "${CMD_K3S_EXEC}")
>>${LOG_FILE} 2>&1"
output_log=${LOG_FILE}
error_log=${LOG_FILE}
pidfile="/var/run/${SYSTEM_NAME}.pid"
respawn_delay=5
respawn_max=0
set -o allexport
if [ -f /etc/environment ]; then source /etc/environment; fi
if [ -f ${FILE_K3S_ENV} ]; then source ${FILE_K3S_ENV}; fi
set +o allexport
EOF
$SUDO chmod 0755 ${FILE_K3S_SERVICE}
$SUDO tee /etc/logrotate.d/${SYSTEM_NAME} >/dev/null << EOF
${LOG_FILE} {
missingok
notifempty
copytruncate
}
EOF
}
# --- write systemd or openrc service file ---
create_service_file() {
[ "${HAS_SYSTEMD}" = true ] && create_systemd_service_file
[ "${HAS_OPENRC}" = true ] && create_openrc_service_file
return 0
}
# --- get hashes of the current k3s bin and service files
get_installed_hashes() {
$SUDO sha256sum ${BIN_DIR}/k3s ${FILE_K3S_SERVICE} ${FILE_K3S_ENV} 2>&1 || true
}
# --- enable and start systemd service ---
systemd_enable() {
info "systemd: Enabling ${SYSTEM_NAME} unit"
$SUDO systemctl enable ${FILE_K3S_SERVICE} >/dev/null
$SUDO systemctl daemon-reload >/dev/null
}
systemd_start() {
info "systemd: Starting ${SYSTEM_NAME}"
$SUDO systemctl restart ${SYSTEM_NAME}
}
# --- enable and start openrc service ---
openrc_enable() {
info "openrc: Enabling ${SYSTEM_NAME} service for default runlevel"
$SUDO rc-update add ${SYSTEM_NAME} default >/dev/null
}
openrc_start() {
info "openrc: Starting ${SYSTEM_NAME}"
$SUDO ${FILE_K3S_SERVICE} restart
}
# --- startup systemd or openrc service ---
service_enable_and_start() {
[ "${INSTALL_K3S_SKIP_ENABLE}" = true ] && return
[ "${HAS_SYSTEMD}" = true ] && systemd_enable
[ "${HAS_OPENRC}" = true ] && openrc_enable
[ "${INSTALL_K3S_SKIP_START}" = true ] && return
POST_INSTALL_HASHES=$(get_installed_hashes)
if [ "${PRE_INSTALL_HASHES}" = "${POST_INSTALL_HASHES}" ] && [ "${INSTALL_K3S_FORCE_RESTART}" != true ]; then
info 'No change detected so skipping service start'
return
fi
[ "${HAS_SYSTEMD}" = true ] && systemd_start
[ "${HAS_OPENRC}" = true ] && openrc_start
return 0
}
# --- re-evaluate args to include env command ---
eval set -- $(escape "${INSTALL_K3S_EXEC}") $(quote "$@")
# --- run the install process --
{
verify_system
setup_env "$@"
download_and_verify
setup_selinux
create_symlinks
create_killall
create_uninstall
systemd_disable
create_env_file
create_service_file
service_enable_and_start
}

View File

@@ -1,507 +0,0 @@
#!/bin/sh
set -e
if [ "${DEBUG}" = 1 ]; then
set -x
fi
# Usage:
# curl ... | ENV_VAR=... sh -
# or
# ENV_VAR=... ./install.sh
#
# Environment variables:
#
# - INSTALL_RKE2_CHANNEL
# Channel to use for fetching rke2 download URL.
# Defaults to 'latest'.
#
# - INSTALL_RKE2_METHOD
# The installation method to use.
# Default is on RPM-based systems is "rpm", all else "tar".
#
# - INSTALL_RKE2_TYPE
# Type of rke2 service. Can be either "server" or "agent".
# Default is "server".
#
# - INSTALL_RKE2_EXEC
# This is an alias for INSTALL_RKE2_TYPE, included for compatibility with K3s.
# If both are set, INSTALL_RKE2_TYPE is preferred.
#
# - INSTALL_RKE2_VERSION
# Version of rke2 to download from github.
#
# - INSTALL_RKE2_RPM_RELEASE_VERSION
# Version of the rke2 RPM release to install.
# Format would be like "1.el7" or "2.el8"
#
# - INSTALL_RKE2_TAR_PREFIX
# Installation prefix when using the tar installation method.
# Default is /usr/local, unless /usr/local is read-only or has a dedicated mount point,
# in which case /opt/rke2 is used instead.
#
# - INSTALL_RKE2_COMMIT
# Commit of RKE2 to download from temporary cloud storage.
# If set, this forces INSTALL_RKE2_METHOD=tar.
# * (for developer & QA use)
#
# - INSTALL_RKE2_AGENT_IMAGES_DIR
# Installation path for airgap images when installing from CI commit
# Default is /var/lib/rancher/rke2/agent/images
#
# - INSTALL_RKE2_ARTIFACT_PATH
# If set, the install script will use the local path for sourcing the rke2.linux-$SUFFIX and sha256sum-$ARCH.txt files
# rather than the downloading the files from the internet.
# Default is not set.
#
# info logs the given argument at info log level.
info() {
echo "[INFO] " "$@"
}
# warn logs the given argument at warn log level.
warn() {
echo "[WARN] " "$@" >&2
}
# fatal logs the given argument at fatal log level.
fatal() {
echo "[ERROR] " "$@" >&2
if [ -n "${SUFFIX}" ]; then
echo "[ALT] Please visit 'https://github.com/rancher/rke2/releases' directly and download the latest rke2.${SUFFIX}.tar.gz" >&2
fi
exit 1
}
# check_target_mountpoint return success if the target directory is on a dedicated mount point
check_target_mountpoint() {
mountpoint -q "${INSTALL_RKE2_TAR_PREFIX}"
}
# check_target_ro returns success if the target directory is read-only
check_target_ro() {
touch "${INSTALL_RKE2_TAR_PREFIX}"/.rke2-ro-test && rm -rf "${INSTALL_RKE2_TAR_PREFIX}"/.rke2-ro-test
test $? -ne 0
}
# setup_env defines needed environment variables.
setup_env() {
STORAGE_URL="https://storage.googleapis.com/rke2-ci-builds"
INSTALL_RKE2_GITHUB_URL="https://github.com/rancher/rke2"
DEFAULT_TAR_PREFIX="/usr/local"
# --- bail if we are not root ---
if [ ! $(id -u) -eq 0 ]; then
fatal "You need to be root to perform this install"
fi
# --- make sure install channel has a value
if [ -z "${INSTALL_RKE2_CHANNEL}" ]; then
INSTALL_RKE2_CHANNEL="stable"
fi
# --- make sure install type has a value
if [ -z "${INSTALL_RKE2_TYPE}" ]; then
INSTALL_RKE2_TYPE="${INSTALL_RKE2_EXEC:-server}"
fi
# --- use yum install method if available by default
if [ -z "${INSTALL_RKE2_ARTIFACT_PATH}" ] && [ -z "${INSTALL_RKE2_COMMIT}" ] && [ -z "${INSTALL_RKE2_METHOD}" ] && command -v yum >/dev/null 2>&1; then
INSTALL_RKE2_METHOD="yum"
fi
# --- install tarball to /usr/local by default, except if /usr/local is on a separate partition or is read-only
# --- in which case we go into /opt/rke2.
if [ -z "${INSTALL_RKE2_TAR_PREFIX}" ]; then
INSTALL_RKE2_TAR_PREFIX=${DEFAULT_TAR_PREFIX}
if check_target_mountpoint || check_target_ro; then
INSTALL_RKE2_TAR_PREFIX="/opt/rke2"
warn "${DEFAULT_TAR_PREFIX} is read-only or a mount point; installing to ${INSTALL_RKE2_TAR_PREFIX}"
fi
fi
if [ -z "${INSTALL_RKE2_AGENT_IMAGES_DIR}" ]; then
INSTALL_RKE2_AGENT_IMAGES_DIR="/var/lib/rancher/rke2/agent/images"
fi
}
# check_method_conflict will exit with an error if the user attempts to install
# via tar method on a host with existing RPMs.
check_method_conflict() {
case ${INSTALL_RKE2_METHOD} in
yum | rpm | dnf)
return
;;
*)
if rpm -q rke2-common >/dev/null 2>&1; then
fatal "Cannot perform ${INSTALL_RKE2_METHOD:-tar} install on host with existing RKE2 RPMs - please run rke2-uninstall.sh first"
fi
;;
esac
}
# setup_arch set arch and suffix,
# fatal if architecture not supported.
setup_arch() {
case ${ARCH:=$(uname -m)} in
amd64)
ARCH=amd64
SUFFIX=$(uname -s | tr '[:upper:]' '[:lower:]')-${ARCH}
;;
x86_64)
ARCH=amd64
SUFFIX=$(uname -s | tr '[:upper:]' '[:lower:]')-${ARCH}
;;
*)
fatal "unsupported architecture ${ARCH}"
;;
esac
}
# verify_downloader verifies existence of
# network downloader executable.
verify_downloader() {
cmd="$(command -v "${1}")"
if [ -z "${cmd}" ]; then
return 1
fi
if [ ! -x "${cmd}" ]; then
return 1
fi
# Set verified executable as our downloader program and return success
DOWNLOADER=${cmd}
return 0
}
# setup_tmp creates a temporary directory
# and cleans up when done.
setup_tmp() {
TMP_DIR=$(mktemp -d -t rke2-install.XXXXXXXXXX)
TMP_CHECKSUMS=${TMP_DIR}/rke2.checksums
TMP_TARBALL=${TMP_DIR}/rke2.tarball
TMP_AIRGAP_CHECKSUMS=${TMP_DIR}/rke2-images.checksums
TMP_AIRGAP_TARBALL=${TMP_DIR}/rke2-images.tarball
cleanup() {
code=$?
set +e
trap - EXIT
rm -rf "${TMP_DIR}"
exit $code
}
trap cleanup INT EXIT
}
# --- use desired rke2 version if defined or find version from channel ---
get_release_version() {
if [ -n "${INSTALL_RKE2_COMMIT}" ]; then
version="commit ${INSTALL_RKE2_COMMIT}"
elif [ -n "${INSTALL_RKE2_VERSION}" ]; then
version=${INSTALL_RKE2_VERSION}
else
info "finding release for channel ${INSTALL_RKE2_CHANNEL}"
INSTALL_RKE2_CHANNEL_URL=${INSTALL_RKE2_CHANNEL_URL:-'https://update.rke2.io/v1-release/channels'}
version_url="${INSTALL_RKE2_CHANNEL_URL}/${INSTALL_RKE2_CHANNEL}"
case ${DOWNLOADER} in
*curl)
version=$(${DOWNLOADER} -w "%{url_effective}" -L -s -S "${version_url}" -o /dev/null | sed -e 's|.*/||')
;;
*wget)
version=$(${DOWNLOADER} -SqO /dev/null "${version_url}" 2>&1 | grep -i Location | sed -e 's|.*/||')
;;
*)
fatal "Unsupported downloader executable '${DOWNLOADER}'"
;;
esac
INSTALL_RKE2_VERSION="${version}"
fi
}
# check_download performs a HEAD request to see if a file exists at a given url
check_download() {
case ${DOWNLOADER} in
*curl)
curl -o "/dev/null" -fsLI -X HEAD "$1"
;;
*wget)
wget -q --spider "$1"
;;
*)
fatal "downloader executable not supported: '${DOWNLOADER}'"
;;
esac
}
# download downloads a file from a url using either curl or wget
download() {
if [ $# -ne 2 ]; then
fatal "download needs exactly 2 arguments"
fi
case ${DOWNLOADER} in
*curl)
curl -o "$1" -fsSL "$2"
;;
*wget)
wget -qO "$1" "$2"
;;
*)
fatal "downloader executable not supported: '${DOWNLOADER}'"
;;
esac
# Abort if download command failed
if [ $? -ne 0 ]; then
fatal "download failed"
fi
}
# download_checksums downloads hash from github url.
download_checksums() {
if [ -n "${INSTALL_RKE2_COMMIT}" ]; then
CHECKSUMS_URL=${STORAGE_URL}/rke2.${SUFFIX}-${INSTALL_RKE2_COMMIT}.tar.gz.sha256sum
else
CHECKSUMS_URL=${INSTALL_RKE2_GITHUB_URL}/releases/download/${INSTALL_RKE2_VERSION}/sha256sum-${ARCH}.txt
fi
info "downloading checksums at ${CHECKSUMS_URL}"
download "${TMP_CHECKSUMS}" "${CHECKSUMS_URL}"
CHECKSUM_EXPECTED=$(grep "rke2.${SUFFIX}.tar.gz" "${TMP_CHECKSUMS}" | awk '{print $1}')
}
# download_tarball downloads binary from github url.
download_tarball() {
if [ -n "${INSTALL_RKE2_COMMIT}" ]; then
TARBALL_URL=${STORAGE_URL}/rke2.${SUFFIX}-${INSTALL_RKE2_COMMIT}.tar.gz
else
TARBALL_URL=${INSTALL_RKE2_GITHUB_URL}/releases/download/${INSTALL_RKE2_VERSION}/rke2.${SUFFIX}.tar.gz
fi
info "downloading tarball at ${TARBALL_URL}"
download "${TMP_TARBALL}" "${TARBALL_URL}"
}
# stage_local_checksums stages the local checksum hash for validation.
stage_local_checksums() {
info "staging local checksums from ${INSTALL_RKE2_ARTIFACT_PATH}/sha256sum-${ARCH}.txt"
cp -f "${INSTALL_RKE2_ARTIFACT_PATH}/sha256sum-${ARCH}.txt" "${TMP_CHECKSUMS}"
CHECKSUM_EXPECTED=$(grep "rke2.${SUFFIX}.tar.gz" "${TMP_CHECKSUMS}" | awk '{print $1}')
if [ -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.zst" ]; then
AIRGAP_CHECKSUM_EXPECTED=$(grep "rke2-images.${SUFFIX}.tar.zst" "${TMP_CHECKSUMS}" | awk '{print $1}')
elif [ -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.gz" ]; then
AIRGAP_CHECKSUM_EXPECTED=$(grep "rke2-images.${SUFFIX}.tar.gz" "${TMP_CHECKSUMS}" | awk '{print $1}')
fi
}
# stage_local_tarball stages the local tarball.
stage_local_tarball() {
info "staging tarball from ${INSTALL_RKE2_ARTIFACT_PATH}/rke2.${SUFFIX}.tar.gz"
cp -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2.${SUFFIX}.tar.gz" "${TMP_TARBALL}"
}
# stage_local_airgap_tarball stages the local checksum hash for validation.
stage_local_airgap_tarball() {
if [ -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.zst" ]; then
info "staging zst airgap image tarball from ${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.zst"
cp -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.zst" "${TMP_AIRGAP_TARBALL}"
AIRGAP_TARBALL_FORMAT=zst
elif [ -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.gz" ]; then
info "staging gzip airgap image tarball from ${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.gz"
cp -f "${INSTALL_RKE2_ARTIFACT_PATH}/rke2-images.${SUFFIX}.tar.gz" "${TMP_AIRGAP_TARBALL}"
AIRGAP_TARBALL_FORMAT=gz
fi
}
# verify_tarball verifies the downloaded installer checksum.
verify_tarball() {
info "verifying tarball"
CHECKSUM_ACTUAL=$(sha256sum "${TMP_TARBALL}" | awk '{print $1}')
if [ "${CHECKSUM_EXPECTED}" != "${CHECKSUM_ACTUAL}" ]; then
fatal "download sha256 does not match ${CHECKSUM_EXPECTED}, got ${CHECKSUM_ACTUAL}"
fi
}
# unpack_tarball extracts the tarball, correcting paths and moving systemd units as necessary
unpack_tarball() {
info "unpacking tarball file to ${INSTALL_RKE2_TAR_PREFIX}"
mkdir -p ${INSTALL_RKE2_TAR_PREFIX}
tar xzf "${TMP_TARBALL}" -C "${INSTALL_RKE2_TAR_PREFIX}"
if [ "${INSTALL_RKE2_TAR_PREFIX}" != "${DEFAULT_TAR_PREFIX}" ]; then
info "updating tarball contents to reflect install path"
sed -i "s|${DEFAULT_TAR_PREFIX}|${INSTALL_RKE2_TAR_PREFIX}|" ${INSTALL_RKE2_TAR_PREFIX}/lib/systemd/system/rke2-*.service ${INSTALL_RKE2_TAR_PREFIX}/bin/rke2-uninstall.sh
info "moving systemd units to /etc/systemd/system"
mv -f ${INSTALL_RKE2_TAR_PREFIX}/lib/systemd/system/rke2-*.service /etc/systemd/system/
info "install complete; you may want to run: export PATH=\$PATH:${INSTALL_RKE2_TAR_PREFIX}/bin"
fi
}
# download_airgap_checksums downloads the checksum file for the airgap image tarball
# and prepares the checksum value for later validation.
download_airgap_checksums() {
if [ -z "${INSTALL_RKE2_COMMIT}" ]; then
return
fi
AIRGAP_CHECKSUMS_URL=${STORAGE_URL}/rke2-images.${SUFFIX}-${INSTALL_RKE2_COMMIT}.tar.zst.sha256sum
# try for zst first; if that fails use gz for older release branches
if ! check_download "${AIRGAP_CHECKSUMS_URL}"; then
AIRGAP_CHECKSUMS_URL=${STORAGE_URL}/rke2-images.${SUFFIX}-${INSTALL_RKE2_COMMIT}.tar.gz.sha256sum
fi
info "downloading airgap checksums at ${AIRGAP_CHECKSUMS_URL}"
download "${TMP_AIRGAP_CHECKSUMS}" "${AIRGAP_CHECKSUMS_URL}"
AIRGAP_CHECKSUM_EXPECTED=$(grep "rke2-images.${SUFFIX}.tar" "${TMP_AIRGAP_CHECKSUMS}" | awk '{print $1}')
}
# download_airgap_tarball downloads the airgap image tarball.
download_airgap_tarball() {
if [ -z "${INSTALL_RKE2_COMMIT}" ]; then
return
fi
AIRGAP_TARBALL_URL=${STORAGE_URL}/rke2-images.${SUFFIX}-${INSTALL_RKE2_COMMIT}.tar.zst
# try for zst first; if that fails use gz for older release branches
if ! check_download "${AIRGAP_TARBALL_URL}"; then
AIRGAP_TARBALL_URL=${STORAGE_URL}/rke2-images.${SUFFIX}-${INSTALL_RKE2_COMMIT}.tar.gz
fi
info "downloading airgap tarball at ${AIRGAP_TARBALL_URL}"
download "${TMP_AIRGAP_TARBALL}" "${AIRGAP_TARBALL_URL}"
}
# verify_airgap_tarball compares the airgap image tarball checksum to the value
# calculated by CI when the file was uploaded.
verify_airgap_tarball() {
if [ -z "${AIRGAP_CHECKSUM_EXPECTED}" ]; then
return
fi
info "verifying airgap tarball"
AIRGAP_CHECKSUM_ACTUAL=$(sha256sum "${TMP_AIRGAP_TARBALL}" | awk '{print $1}')
if [ "${AIRGAP_CHECKSUM_EXPECTED}" != "${AIRGAP_CHECKSUM_ACTUAL}" ]; then
fatal "download sha256 does not match ${AIRGAP_CHECKSUM_EXPECTED}, got ${AIRGAP_CHECKSUM_ACTUAL}"
fi
}
# install_airgap_tarball moves the airgap image tarball into place.
install_airgap_tarball() {
if [ -z "${AIRGAP_CHECKSUM_EXPECTED}" ]; then
return
fi
mkdir -p "${INSTALL_RKE2_AGENT_IMAGES_DIR}"
# releases that provide zst artifacts can read from the compressed archive; older releases
# that produce only gzip artifacts need to have the tarball decompressed ahead of time
if grep -qF '.tar.zst' "${TMP_AIRGAP_CHECKSUMS}" || [ "${AIRGAP_TARBALL_FORMAT}" = "zst" ]; then
info "installing airgap tarball to ${INSTALL_RKE2_AGENT_IMAGES_DIR}"
mv -f "${TMP_AIRGAP_TARBALL}" "${INSTALL_RKE2_AGENT_IMAGES_DIR}/rke2-images.${SUFFIX}.tar.zst"
else
info "decompressing airgap tarball to ${INSTALL_RKE2_AGENT_IMAGES_DIR}"
gzip -dc "${TMP_AIRGAP_TARBALL}" > "${INSTALL_RKE2_AGENT_IMAGES_DIR}/rke2-images.${SUFFIX}.tar"
fi
}
# do_install_rpm builds a yum repo config from the channel and version to be installed,
# and calls yum to install the required packates.
do_install_rpm() {
maj_ver="7"
if [ -r /etc/redhat-release ] || [ -r /etc/centos-release ] || [ -r /etc/oracle-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
maj_ver=$(echo "$dist_version" | sed -E -e "s/^([0-9]+)\.?[0-9]*$/\1/")
case ${maj_ver} in
7|8)
:
;;
*) # In certain cases, like installing on Fedora, maj_ver will end up being something that is not 7 or 8
maj_ver="7"
;;
esac
fi
case "${INSTALL_RKE2_CHANNEL}" in
v*.*)
# We are operating with a version-based channel, so we should parse our version out
rke2_majmin=$(echo "${INSTALL_RKE2_CHANNEL}" | sed -E -e "s/^v([0-9]+\.[0-9]+).*/\1/")
rke2_rpm_channel=$(echo "${INSTALL_RKE2_CHANNEL}" | sed -E -e "s/^v[0-9]+\.[0-9]+-(.*)/\1/")
# If our regex fails to capture a "sane" channel out of the specified channel, fall back to `stable`
if [ "${rke2_rpm_channel}" = ${INSTALL_RKE2_CHANNEL} ]; then
info "using stable RPM repositories"
rke2_rpm_channel="stable"
fi
;;
*)
get_release_version
rke2_majmin=$(echo "${INSTALL_RKE2_VERSION}" | sed -E -e "s/^v([0-9]+\.[0-9]+).*/\1/")
rke2_rpm_channel=${1}
;;
esac
info "using ${rke2_majmin} series from channel ${rke2_rpm_channel}"
rpm_site="rpm.rancher.io"
if [ "${rke2_rpm_channel}" = "testing" ]; then
rpm_site="rpm-${rke2_rpm_channel}.rancher.io"
fi
rm -f /etc/yum.repos.d/rancher-rke2*.repo
cat <<-EOF >"/etc/yum.repos.d/rancher-rke2.repo"
[rancher-rke2-common-${rke2_rpm_channel}]
name=Rancher RKE2 Common (${1})
baseurl=https://${rpm_site}/rke2/${rke2_rpm_channel}/common/centos/${maj_ver}/noarch
enabled=1
gpgcheck=1
gpgkey=https://${rpm_site}/public.key
[rancher-rke2-${rke2_majmin}-${rke2_rpm_channel}]
name=Rancher RKE2 ${rke2_majmin} (${1})
baseurl=https://${rpm_site}/rke2/${rke2_rpm_channel}/${rke2_majmin}/centos/${maj_ver}/x86_64
enabled=1
gpgcheck=1
gpgkey=https://${rpm_site}/public.key
EOF
if [ -z "${INSTALL_RKE2_VERSION}" ]; then
yum -y install "rke2-${INSTALL_RKE2_TYPE}"
else
rke2_rpm_version=$(echo "${INSTALL_RKE2_VERSION}" | sed -E -e "s/[\+-]/~/g" | sed -E -e "s/v(.*)/\1/")
if [ -n "${INSTALL_RKE2_RPM_RELEASE_VERSION}" ]; then
yum -y install "rke2-${INSTALL_RKE2_TYPE}-${rke2_rpm_version}-${INSTALL_RKE2_RPM_RELEASE_VERSION}"
else
yum -y install "rke2-${INSTALL_RKE2_TYPE}-${rke2_rpm_version}"
fi
fi
}
do_install_tar() {
setup_tmp
if [ -n "${INSTALL_RKE2_ARTIFACT_PATH}" ]; then
stage_local_checksums
stage_local_airgap_tarball
stage_local_tarball
else
get_release_version
info "using ${INSTALL_RKE2_VERSION:-commit $INSTALL_RKE2_COMMIT} as release"
download_airgap_checksums
download_airgap_tarball
download_checksums
download_tarball
fi
verify_airgap_tarball
install_airgap_tarball
verify_tarball
unpack_tarball
systemctl daemon-reload
}
do_install() {
setup_env
check_method_conflict
setup_arch
if [ -z "${INSTALL_RKE2_ARTIFACT_PATH}" ]; then
verify_downloader curl || verify_downloader wget || fatal "can not find curl or wget for downloading files"
fi
case ${INSTALL_RKE2_METHOD} in
yum | rpm | dnf)
do_install_rpm "${INSTALL_RKE2_CHANNEL}"
;;
*)
do_install_tar "${INSTALL_RKE2_CHANNEL}"
;;
esac
}
do_install
exit 0

View File

@@ -1,173 +0,0 @@
package driver
import (
"bufio"
"context"
_ "embed"
"fmt"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/imdario/mergo"
"github.com/rancherfederal/hauler/pkg/packager/images"
"io"
"k8s.io/apimachinery/pkg/runtime/schema"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"sigs.k8s.io/cli-utils/pkg/object"
"sigs.k8s.io/yaml"
)
const (
k3sReleaseUrl = "https://github.com/k3s-io/k3s/releases/download"
)
//go:embed embed/k3s-init.sh
var k3sInit string
type K3s struct {
Version string
Config K3sConfig
}
//TODO: Would be nice if these just pointed to k3s/pkg/cli/cmds
type K3sConfig struct {
DataDir string `json:"data-dir,omitempty"`
KubeConfig string `json:"write-kubeconfig,omitempty"`
KubeConfigMode string `json:"write-kubeconfig-mode,omitempty"`
Disable []string `json:"disable,omitempty"`
}
//NewK3s returns a new k3s driver
func NewK3s() K3s {
//TODO: Allow for configuration overrides
return K3s{
Config: K3sConfig{
DataDir: "/var/lib/rancher/k3s",
KubeConfig: "/etc/rancher/k3s/k3s.yaml",
KubeConfigMode: "0644",
Disable: []string{},
},
}
}
func (k K3s) Name() string { return "k3s" }
func (k K3s) KubeConfigPath() string { return k.Config.KubeConfig }
func (k K3s) DataPath(elem ...string) string {
base := []string{k.Config.DataDir}
return filepath.Join(append(base, elem...)...)
}
func (k K3s) WriteConfig() error {
kCfgPath := filepath.Dir(k.Config.KubeConfig)
if err := os.MkdirAll(kCfgPath, os.ModePerm); err != nil {
return err
}
data, err := yaml.Marshal(k.Config)
c := make(map[string]interface{})
if err := yaml.Unmarshal(data, &c); err != nil {
return err
}
var uc map[string]interface{}
path := filepath.Join(kCfgPath, "config.yaml")
if data, err := os.ReadFile(path); err != nil {
err := yaml.Unmarshal(data, &uc)
if err != nil {
return err
}
}
//Merge with user defined configs taking precedence
if err := mergo.Merge(&c, uc); err != nil {
return err
}
mergedData, err := yaml.Marshal(&c)
if err != nil {
return err
}
return os.WriteFile(path, mergedData, 0644)
}
func (k K3s) Images(ctx context.Context) (map[name.Reference]v1.Image, error) {
imgs, err := k.listImages()
if err != nil {
return nil, err
}
return images.ResolveRemoteRefs(imgs...)
}
func (k K3s) Binary() (io.ReadCloser, error) {
u, err := url.Parse(fmt.Sprintf("%s/%s/%s", k3sReleaseUrl, k.Version, k.Name()))
if err != nil {
return nil, err
}
resp, err := http.Get(u.String())
if err != nil || resp.StatusCode != 200 {
return nil, fmt.Errorf("failed to return executable for k3s %s from %s", k.Version, u.String())
}
return resp.Body, nil
}
//SystemObjects returns a slice of object.ObjMetadata required for driver to be functional and accept new resources
//hauler's bootstrapping sequence will always wait for SystemObjects to be in a Ready status before proceeding
func (k K3s) SystemObjects() (objs []object.ObjMetadata) {
for _, dep := range []string{"coredns"} {
objMeta, _ := object.CreateObjMetadata("kube-system", dep, schema.GroupKind{Kind: "Deployment", Group: "apps"})
objs = append(objs, objMeta)
}
return objs
}
func (k K3s) Start(out io.Writer) error {
if err := os.WriteFile("/opt/hauler/bin/k3s-init.sh", []byte(k3sInit), 0755); err != nil {
return err
}
cmd := exec.Command("/bin/sh", "/opt/hauler/bin/k3s-init.sh")
cmd.Env = append(os.Environ(), []string{
"INSTALL_K3S_SKIP_DOWNLOAD=true",
"INSTALL_K3S_SELINUX_WARN=true",
"INSTALL_K3S_SKIP_SELINUX_RPM=true",
"INSTALL_K3S_BIN_DIR=/opt/hauler/bin",
//TODO: Provide a real dryrun option
//"INSTALL_K3S_SKIP_START=true",
}...)
cmd.Stdout = out
return cmd.Run()
}
func (k K3s) listImages() ([]string, error) {
u, err := url.Parse(fmt.Sprintf("%s/%s/k3s-images.txt", k3sReleaseUrl, k.Version))
if err != nil {
return nil, err
}
resp, err := http.Get(u.String())
if err != nil || resp.StatusCode != 200 {
return nil, fmt.Errorf("failed to return images for k3s %s from %s", k.Version, u.String())
}
defer resp.Body.Close()
var imgs []string
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
imgs = append(imgs, scanner.Text())
}
return imgs, nil
}

View File

@@ -1,211 +0,0 @@
package fs
import (
"fmt"
"github.com/rancherfederal/hauler/pkg/packager/images"
"io"
"os"
"path/filepath"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/layout"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
fleetapi "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
"github.com/spf13/afero"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/downloader"
"helm.sh/helm/v3/pkg/getter"
"k8s.io/apimachinery/pkg/util/json"
)
type PkgFs struct {
FS *afero.BasePathFs
root string
}
func NewPkgFS(dir string) PkgFs {
var p PkgFs
p.FS = afero.NewBasePathFs(afero.NewOsFs(), dir).(*afero.BasePathFs)
// TODO: absolutely no way this'll bite us in the butt later...
abs, _ := filepath.Abs(dir)
p.root = abs
return p
}
func (p PkgFs) Path(elem ...string) string {
complete := []string{p.root}
return filepath.Join(append(complete, elem...)...)
}
func (p PkgFs) Bundle() PkgFs {
return PkgFs{
FS: afero.NewBasePathFs(p.FS, v1alpha1.BundlesDir).(*afero.BasePathFs),
root: p.Path(v1alpha1.BundlesDir),
}
}
func (p PkgFs) Image() PkgFs {
return PkgFs{
FS: afero.NewBasePathFs(p.FS, v1alpha1.LayoutDir).(*afero.BasePathFs),
root: p.Path(v1alpha1.LayoutDir),
}
}
func (p PkgFs) Bin() PkgFs {
return PkgFs{
FS: afero.NewBasePathFs(p.FS, v1alpha1.BinDir).(*afero.BasePathFs),
root: p.Path(v1alpha1.BinDir),
}
}
func (p PkgFs) Chart() PkgFs {
return PkgFs{
FS: afero.NewBasePathFs(p.FS, v1alpha1.ChartDir).(*afero.BasePathFs),
root: p.Path(v1alpha1.ChartDir),
}
}
//AddBundle will add a bundle to a package and all images that are autodetected from it
func (p PkgFs) AddBundle(b *fleetapi.Bundle) (map[name.Reference]v1.Image, error) {
if err := p.mkdirIfNotExists(v1alpha1.BundlesDir, os.ModePerm); err != nil {
return nil, err
}
data, err := json.Marshal(b)
if err != nil {
return nil, err
}
if err := p.Bundle().WriteFile(fmt.Sprintf("%s.json", b.Name), data, 0644); err != nil {
return nil, err
}
imgs, err := images.ImageMapFromBundle(b)
if err != nil {
return nil, err
}
return imgs, nil
}
func (p PkgFs) AddBin(r io.Reader, name string) error {
if err := p.mkdirIfNotExists(v1alpha1.BinDir, os.ModePerm); err != nil {
return err
}
f, err := p.Bin().FS.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0755)
if err != nil {
return err
}
_, err = io.Copy(f, r)
return err
}
//AddImage will add an image to the pkgfs in OCI layout fmt
//TODO: Extra work is done to ensure this is unique within the index.json
func (p PkgFs) AddImage(ref name.Reference, img v1.Image) error {
if err := p.mkdirIfNotExists(v1alpha1.LayoutDir, os.ModePerm); err != nil {
return err
}
annotations := make(map[string]string)
annotations[ocispec.AnnotationRefName] = ref.Name()
lp, err := p.layout()
if err != nil {
return err
}
//TODO: Change to ReplaceImage
return lp.AppendImage(img, layout.WithAnnotations(annotations))
}
//TODO: Not very robust
//For ref: https://github.com/helm/helm/blob/bf486a25cdc12017c7dac74d1582a8a16acd37ea/pkg/action/pull.go#L75
func (p PkgFs) AddChart(ref string, version string) error {
if err := p.mkdirIfNotExists(v1alpha1.ChartDir, os.ModePerm); err != nil {
return err
}
d := downloader.ChartDownloader{
Out: nil,
Verify: downloader.VerifyNever,
Getters: getter.All(cli.New()), // TODO: Probably shouldn't do this...
Options: []getter.Option{
getter.WithInsecureSkipVerifyTLS(true),
},
}
_, _, err := d.DownloadTo(ref, version, p.Chart().Path())
return err
}
func (p PkgFs) layout() (layout.Path, error) {
path := p.Image().Path(".")
lp, err := layout.FromPath(path)
if os.IsNotExist(err) {
lp, err = layout.Write(path, empty.Index)
}
return lp, err
}
//WriteFile is a helper method to write a file within the PkgFs
func (p PkgFs) WriteFile(name string, data []byte, perm os.FileMode) error {
f, err := p.FS.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
_, err = f.Write(data)
if err1 := f.Close(); err1 != nil && err == nil {
err = err1
}
return err
}
func (p PkgFs) MapLayout() (map[name.Reference]v1.Image, error) {
imgRefs := make(map[name.Reference]v1.Image)
//TODO: Factor this out to a Store interface
lp, err := p.layout()
if err != nil {
return nil, err
}
ii, _ := lp.ImageIndex()
im, _ := ii.IndexManifest()
for _, m := range im.Manifests {
ref, err := name.ParseReference(m.Annotations[ocispec.AnnotationRefName])
if err != nil {
return nil, err
}
img, err := lp.Image(m.Digest)
if err != nil {
return nil, err
}
imgRefs[ref] = img
}
return imgRefs, err
}
//TODO: Is this actually faster than just os.MkdirAll?
func (p PkgFs) mkdirIfNotExists(dir string, perm os.FileMode) error {
_, err := os.Stat(p.Path(dir))
if os.IsNotExist(err) {
mkdirErr := p.FS.MkdirAll(dir, perm)
if mkdirErr != nil {
return mkdirErr
}
}
return nil
}

View File

@@ -1,39 +0,0 @@
package kube
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"path/filepath"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func NewKubeConfig() (*rest.Config, error) {
loadingRules := &clientcmd.ClientConfigLoadingRules{
Precedence: []string{
filepath.Join("/etc/rancher/k3s/k3s.yaml"),
filepath.Join("/etc/rancher/rke2/rke2.yaml"),
},
WarnIfAllMissing: true,
}
cfgOverrides := &clientcmd.ConfigOverrides{}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, cfgOverrides)
return kubeConfig.ClientConfig()
}
//NewClient returns a fresh kube client
func NewClient() (client.Client, error) {
cfg, err := NewKubeConfig()
if err != nil {
return nil, err
}
scheme := runtime.NewScheme()
return client.New(cfg, client.Options{
Scheme: scheme,
})
}

Some files were not shown because too many files have changed in this diff Show More