mirror of
https://github.com/hauler-dev/hauler.git
synced 2026-03-01 17:20:26 +00:00
Compare commits
1 Commits
main
...
chunk-the-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3cd32fc210 |
17
.github/workflows/tests.yaml
vendored
17
.github/workflows/tests.yaml
vendored
@@ -250,6 +250,13 @@ jobs:
|
||||
hauler store save --filename store.tar.zst
|
||||
# verify via save with filename and platform (amd64)
|
||||
hauler store save --filename store-amd64.tar.zst --platform linux/amd64
|
||||
# verify via save with chunk-size (splits into haul-chunked_0.tar.zst, haul-chunked_1.tar.zst, ...)
|
||||
hauler store save --filename haul-chunked.tar.zst --chunk-size 50M
|
||||
# verify chunk files exist and original is removed
|
||||
ls haul-chunked_*.tar.zst
|
||||
! test -f haul-chunked.tar.zst
|
||||
# verify at least two chunks were produced
|
||||
[ $(ls haul-chunked_*.tar.zst | wc -l) -ge 2 ]
|
||||
|
||||
- name: Remove Hauler Store Contents
|
||||
run: |
|
||||
@@ -269,6 +276,14 @@ jobs:
|
||||
hauler store load --filename store.tar.zst --tempdir /opt
|
||||
# verify via load with filename and platform (amd64)
|
||||
hauler store load --filename store-amd64.tar.zst
|
||||
# verify via load from chunks using explicit first chunk
|
||||
rm -rf store
|
||||
hauler store load --filename haul-chunked_0.tar.zst
|
||||
hauler store info
|
||||
# verify via load from chunks using base filename (auto-detect)
|
||||
rm -rf store
|
||||
hauler store load --filename haul-chunked.tar.zst
|
||||
hauler store info
|
||||
|
||||
- name: Verify Hauler Store Contents
|
||||
run: |
|
||||
@@ -291,7 +306,7 @@ jobs:
|
||||
|
||||
- name: Remove Hauler Store Contents
|
||||
run: |
|
||||
rm -rf store haul.tar.zst store.tar.zst store-amd64.tar.zst
|
||||
rm -rf store haul.tar.zst store.tar.zst store-amd64.tar.zst haul-chunked_*.tar.zst
|
||||
hauler store info
|
||||
|
||||
- name: Verify - hauler store sync
|
||||
|
||||
@@ -39,8 +39,9 @@ func LoadCmd(ctx context.Context, o *flags.LoadOpts, rso *flags.StoreRootOpts, r
|
||||
l.Debugf("using temporary directory at [%s]", tempDir)
|
||||
|
||||
for _, fileName := range o.FileName {
|
||||
l.Infof("loading haul [%s] to [%s]", fileName, o.StoreDir)
|
||||
err := unarchiveLayoutTo(ctx, fileName, o.StoreDir, tempDir)
|
||||
resolved := resolveHaulPath(fileName)
|
||||
l.Infof("loading haul [%s] to [%s]", resolved, o.StoreDir)
|
||||
err := unarchiveLayoutTo(ctx, resolved, o.StoreDir, tempDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -85,6 +86,13 @@ func unarchiveLayoutTo(ctx context.Context, haulPath string, dest string, tempDi
|
||||
}
|
||||
}
|
||||
|
||||
// reassemble chunk files if haulPath matches the chunk naming pattern
|
||||
joined, err := archives.JoinChunks(ctx, haulPath, tempDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
haulPath = joined
|
||||
|
||||
if err := archives.Unarchive(ctx, haulPath, tempDir); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -139,6 +147,29 @@ func unarchiveLayoutTo(ctx context.Context, haulPath string, dest string, tempDi
|
||||
return err
|
||||
}
|
||||
|
||||
// resolveHaulPath returns path as-is if it exists or is a URL. If the file is
|
||||
// not found, it globs for chunk files matching <base>_*<ext> in the same
|
||||
// directory and returns the first match so JoinChunks can reassemble them.
|
||||
func resolveHaulPath(path string) string {
|
||||
if strings.HasPrefix(path, "http://") || strings.HasPrefix(path, "https://") {
|
||||
return path
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return path
|
||||
}
|
||||
base := path
|
||||
ext := ""
|
||||
for filepath.Ext(base) != "" {
|
||||
ext = filepath.Ext(base) + ext
|
||||
base = strings.TrimSuffix(base, filepath.Ext(base))
|
||||
}
|
||||
matches, err := filepath.Glob(base + "_*" + ext)
|
||||
if err != nil || len(matches) == 0 {
|
||||
return path
|
||||
}
|
||||
return matches[0]
|
||||
}
|
||||
|
||||
func clearDir(path string) error {
|
||||
entries, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
|
||||
@@ -4,10 +4,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
referencev3 "github.com/distribution/distribution/v3/reference"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
@@ -72,10 +75,47 @@ func SaveCmd(ctx context.Context, o *flags.SaveOpts, rso *flags.StoreRootOpts, r
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("saving store [%s] to archive [%s]", o.StoreDir, o.FileName)
|
||||
if o.ChunkSize != "" {
|
||||
maxBytes, err := parseChunkSize(o.ChunkSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chunks, err := archives.SplitArchive(ctx, absOutputfile, maxBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range chunks {
|
||||
l.Infof("saving store [%s] to chunk [%s]", o.StoreDir, filepath.Base(c))
|
||||
}
|
||||
} else {
|
||||
l.Infof("saving store [%s] to archive [%s]", o.StoreDir, o.FileName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseChunkSize parses a human-readable byte size string (e.g. "1G", "500M", "2GB")
|
||||
// into a byte count. Suffixes are treated as binary units (1K = 1024).
|
||||
func parseChunkSize(s string) (int64, error) {
|
||||
units := map[string]int64{
|
||||
"K": 1 << 10, "KB": 1 << 10,
|
||||
"M": 1 << 20, "MB": 1 << 20,
|
||||
"G": 1 << 30, "GB": 1 << 30,
|
||||
"T": 1 << 40, "TB": 1 << 40,
|
||||
}
|
||||
s = strings.ToUpper(strings.TrimSpace(s))
|
||||
for suffix, mult := range units {
|
||||
if strings.HasSuffix(s, suffix) {
|
||||
n, err := strconv.ParseInt(strings.TrimSuffix(s, suffix), 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid chunk size %q", s)
|
||||
}
|
||||
return n * mult, nil
|
||||
}
|
||||
}
|
||||
return strconv.ParseInt(s, 10, 64)
|
||||
}
|
||||
|
||||
type exports struct {
|
||||
digests []string
|
||||
records map[string]tarball.Descriptor
|
||||
|
||||
@@ -10,6 +10,7 @@ type SaveOpts struct {
|
||||
FileName string
|
||||
Platform string
|
||||
ContainerdCompatibility bool
|
||||
ChunkSize string
|
||||
}
|
||||
|
||||
func (o *SaveOpts) AddFlags(cmd *cobra.Command) {
|
||||
@@ -18,5 +19,6 @@ func (o *SaveOpts) AddFlags(cmd *cobra.Command) {
|
||||
f.StringVarP(&o.FileName, "filename", "f", consts.DefaultHaulerArchiveName, "(Optional) Specify the name of outputted haul")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform for runtime imports... i.e. linux/amd64 (unspecified implies all)")
|
||||
f.BoolVar(&o.ContainerdCompatibility, "containerd", false, "(Optional) Enable import compatibility with containerd... removes oci-layout from the haul")
|
||||
f.StringVar(&o.ChunkSize, "chunk-size", "", "(Optional) Split the output archive into chunks of the specified size (e.g. 1G, 500M, 2048M)")
|
||||
|
||||
}
|
||||
|
||||
@@ -3,8 +3,10 @@ package archives
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/archives"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
@@ -102,3 +104,85 @@ func Archive(ctx context.Context, dir, outfile string, compression archives.Comp
|
||||
l.Debugf("archive created successfully [%s]", outfile)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SplitArchive splits an existing archive into chunks of at most maxBytes each.
|
||||
// Chunks are named <base>_0<ext>, <base>_1<ext>, ... where base is the archive
|
||||
// path with all extensions stripped, and ext is the compound extension (e.g. .tar.zst).
|
||||
// The original archive is removed after successful splitting.
|
||||
func SplitArchive(ctx context.Context, archivePath string, maxBytes int64) ([]string, error) {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// derive base path and compound extension by stripping all extensions
|
||||
base := archivePath
|
||||
ext := ""
|
||||
for filepath.Ext(base) != "" {
|
||||
ext = filepath.Ext(base) + ext
|
||||
base = strings.TrimSuffix(base, filepath.Ext(base))
|
||||
}
|
||||
|
||||
f, err := os.Open(archivePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open archive for splitting: %w", err)
|
||||
}
|
||||
|
||||
var chunks []string
|
||||
buf := make([]byte, 32*1024)
|
||||
chunkIdx := 0
|
||||
var written int64
|
||||
var outf *os.File
|
||||
|
||||
for {
|
||||
if outf == nil {
|
||||
chunkPath := fmt.Sprintf("%s_%d%s", base, chunkIdx, ext)
|
||||
outf, err = os.Create(chunkPath)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, fmt.Errorf("failed to create chunk %d: %w", chunkIdx, err)
|
||||
}
|
||||
chunks = append(chunks, chunkPath)
|
||||
l.Debugf("creating chunk [%s]", chunkPath)
|
||||
written = 0
|
||||
chunkIdx++
|
||||
}
|
||||
|
||||
remaining := maxBytes - written
|
||||
readSize := int64(len(buf))
|
||||
if readSize > remaining {
|
||||
readSize = remaining
|
||||
}
|
||||
|
||||
n, readErr := f.Read(buf[:readSize])
|
||||
if n > 0 {
|
||||
if _, writeErr := outf.Write(buf[:n]); writeErr != nil {
|
||||
outf.Close()
|
||||
f.Close()
|
||||
return nil, fmt.Errorf("failed to write to chunk: %w", writeErr)
|
||||
}
|
||||
written += int64(n)
|
||||
}
|
||||
|
||||
if readErr == io.EOF {
|
||||
outf.Close()
|
||||
outf = nil
|
||||
break
|
||||
}
|
||||
if readErr != nil {
|
||||
outf.Close()
|
||||
f.Close()
|
||||
return nil, fmt.Errorf("failed to read archive: %w", readErr)
|
||||
}
|
||||
|
||||
if written >= maxBytes {
|
||||
outf.Close()
|
||||
outf = nil
|
||||
}
|
||||
}
|
||||
|
||||
f.Close()
|
||||
if err := os.Remove(archivePath); err != nil {
|
||||
return nil, fmt.Errorf("failed to remove original archive after splitting: %w", err)
|
||||
}
|
||||
|
||||
l.Infof("split archive [%s] into %d chunk(s)", filepath.Base(archivePath), len(chunks))
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,9 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/archives"
|
||||
@@ -156,3 +159,77 @@ func Unarchive(ctx context.Context, tarball, dst string) error {
|
||||
l.Infof("unarchiving completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
var chunkSuffixRe = regexp.MustCompile(`^(.+)_(\d+)$`)
|
||||
|
||||
// chunkInfo checks whether archivePath matches the chunk naming pattern (<base>_N<ext>).
|
||||
// Returns the base path (without index), compound extension, numeric index, and whether it matched.
|
||||
func chunkInfo(archivePath string) (base, ext string, index int, ok bool) {
|
||||
dir := filepath.Dir(archivePath)
|
||||
name := filepath.Base(archivePath)
|
||||
|
||||
// strip compound extension (e.g. .tar.zst)
|
||||
nameBase := name
|
||||
nameExt := ""
|
||||
for filepath.Ext(nameBase) != "" {
|
||||
nameExt = filepath.Ext(nameBase) + nameExt
|
||||
nameBase = strings.TrimSuffix(nameBase, filepath.Ext(nameBase))
|
||||
}
|
||||
|
||||
m := chunkSuffixRe.FindStringSubmatch(nameBase)
|
||||
if m == nil {
|
||||
return "", "", 0, false
|
||||
}
|
||||
|
||||
idx, _ := strconv.Atoi(m[2])
|
||||
return filepath.Join(dir, m[1]), nameExt, idx, true
|
||||
}
|
||||
|
||||
// JoinChunks detects whether archivePath is a chunk file and, if so, finds all
|
||||
// sibling chunks, concatenates them in numeric order into a single file in tempDir,
|
||||
// and returns the path to the joined file. If archivePath is not a chunk, it is
|
||||
// returned unchanged.
|
||||
func JoinChunks(ctx context.Context, archivePath, tempDir string) (string, error) {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
base, ext, _, ok := chunkInfo(archivePath)
|
||||
if !ok {
|
||||
return archivePath, nil
|
||||
}
|
||||
|
||||
matches, err := filepath.Glob(base + "_*" + ext)
|
||||
if err != nil || len(matches) == 0 {
|
||||
return archivePath, nil
|
||||
}
|
||||
|
||||
sort.Slice(matches, func(i, j int) bool {
|
||||
_, _, idxI, _ := chunkInfo(matches[i])
|
||||
_, _, idxJ, _ := chunkInfo(matches[j])
|
||||
return idxI < idxJ
|
||||
})
|
||||
|
||||
l.Debugf("joining %d chunk(s) for [%s]", len(matches), base)
|
||||
|
||||
joinedPath := filepath.Join(tempDir, filepath.Base(base)+ext)
|
||||
outf, err := os.Create(joinedPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create joined archive: %w", err)
|
||||
}
|
||||
defer outf.Close()
|
||||
|
||||
for _, chunk := range matches {
|
||||
l.Debugf("joining chunk [%s]", chunk)
|
||||
cf, err := os.Open(chunk)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to open chunk [%s]: %w", chunk, err)
|
||||
}
|
||||
if _, err := io.Copy(outf, cf); err != nil {
|
||||
cf.Close()
|
||||
return "", fmt.Errorf("failed to copy chunk [%s]: %w", chunk, err)
|
||||
}
|
||||
cf.Close()
|
||||
}
|
||||
|
||||
l.Infof("joined %d chunk(s) into [%s]", len(matches), filepath.Base(joinedPath))
|
||||
return joinedPath, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user