add air-gap support (#359)

* add airgap support
* add airgap howto guide
This commit is contained in:
jpgouin
2025-05-27 10:13:07 +02:00
committed by GitHub
parent fdb5bb9c19
commit 2b1448ffb8
9 changed files with 151 additions and 31 deletions

View File

@@ -26,6 +26,10 @@ spec:
value: "{{ .Values.sharedAgent.image.repository }}:{{ default .Chart.AppVersion .Values.sharedAgent.image.tag }}"
- name: SHARED_AGENT_PULL_POLICY
value: {{ .Values.sharedAgent.image.pullPolicy }}
- name: K3S_IMAGE
value: {{ .Values.k3sServer.image.repository }}
- name: K3S_IMAGE_PULL_POLICY
value: {{ .Values.k3sServer.image.pullPolicy }}
ports:
- containerPort: 8080
name: https

View File

@@ -27,3 +27,8 @@ sharedAgent:
repository: "rancher/k3k-kubelet"
tag: ""
pullPolicy: ""
# image registry configuration related to the k3s server
k3sServer:
image:
repository: "rancher/k3s"
pullPolicy: ""

83
howtos/airgap.md Normal file
View File

@@ -0,0 +1,83 @@
# K3k Air Gap Installation Guide
Applicable K3k modes: `virtual`, `shared`
This guide describes how to deploy **K3k** in an **air-gapped environment**, including the packaging of required images, Helm chart configurations, and cluster creation using a private container registry.
---
## 1. Package Required Container Images
### 1.1: Follow K3s Air Gap Preparation
Begin with the official K3s air gap packaging instructions:
[K3s Air Gap Installation Docs](https://docs.k3s.io/installation/airgap)
### 1.2: Include K3k-Specific Images
In addition to the K3s images, make sure to include the following in your image bundle:
| Image Names | Descriptions |
| --------------------------- | --------------------------------------------------------------- |
| `rancher/k3k:<tag>` | K3k controller image (replace `<tag>` with the desired version) |
| `rancher/k3k-kubelet:<tag>` | K3k agent image for shared mode |
| `rancher/k3s:<tag>` | K3s server/agent image for virtual clusters |
Load these images into your internal (air-gapped) registry.
---
## 2. Configure Helm Chart for Air Gap installation
Update the `values.yaml` file in the K3k Helm chart with air gap settings:
```yaml
image:
repository: rancher/k3k
tag: "" # Specify the version tag
pullPolicy: "" # Optional: "IfNotPresent", "Always", etc.
sharedAgent:
image:
repository: rancher/k3k-kubelet
tag: "" # Specify the version tag
pullPolicy: "" # Optional
k3sServer:
image:
repository: rancher/k3s
pullPolicy: "" # Optional
```
These values enforce the use of internal image repositories for the K3k controller, the agent and the server.
**Note** : All virtual clusters will use automatically those settings.
---
## 3. Enforce Registry in Virtual Clusters
When creating a virtual cluster, use the `--system-default-registry` flag to ensure all system components (e.g., CoreDNS) pull from your internal registry:
```bash
k3kcli cluster create \
--server-args "--system-default-registry=registry.internal.domain" \
my-cluster
```
This flag is passed directly to the K3s server in the virtual cluster, influencing all system workload image pulls.
[K3s Server CLI Reference](https://docs.k3s.io/cli/server#k3s-server-cli-help)
---
## 4. Specify K3s Version for Virtual Clusters
K3k allows specifying the K3s version used in each virtual cluster:
```bash
k3kcli cluster create \
--k3s-version v1.29.4+k3s1 \
my-cluster
```
- If omitted, the **host clusters K3s version** will be used by default, which might not exist if it's not part of the air gap package.

17
main.go
View File

@@ -31,6 +31,8 @@ var (
sharedAgentImage string
sharedAgentImagePullPolicy string
kubeconfig string
k3SImage string
k3SImagePullPolicy string
debug bool
logger *log.Logger
flags = []cli.Flag{
@@ -65,6 +67,19 @@ var (
Usage: "Debug level logging",
Destination: &debug,
},
&cli.StringFlag{
Name: "k3s-image",
EnvVars: []string{"K3S_IMAGE"},
Usage: "K3K server image",
Value: "rancher/k3k",
Destination: &k3SImage,
},
&cli.StringFlag{
Name: "k3s-image-pull-policy",
EnvVars: []string{"K3S_IMAGE_PULL_POLICY"},
Usage: "K3K server image pull policy",
Destination: &k3SImagePullPolicy,
},
}
)
@@ -115,7 +130,7 @@ func run(clx *cli.Context) error {
logger.Info("adding cluster controller")
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy); err != nil {
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage, k3SImagePullPolicy); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
}

View File

@@ -20,15 +20,19 @@ const (
type VirtualAgent struct {
*Config
serviceIP string
token string
serviceIP string
token string
k3SImage string
k3SImagePullPolicy string
}
func NewVirtualAgent(config *Config, serviceIP, token string) *VirtualAgent {
func NewVirtualAgent(config *Config, serviceIP, token string, k3SImage string, k3SImagePullPolicy string) *VirtualAgent {
return &VirtualAgent{
Config: config,
serviceIP: serviceIP,
token: token,
Config: config,
serviceIP: serviceIP,
token: token,
k3SImage: k3SImage,
k3SImagePullPolicy: k3SImagePullPolicy,
}
}
@@ -78,7 +82,7 @@ with-node-id: true`, serviceIP, token)
}
func (v *VirtualAgent) deployment(ctx context.Context) error {
image := controller.K3SImage(v.cluster)
image := controller.K3SImage(v.cluster, v.k3SImage)
const name = "k3k-agent"
@@ -175,8 +179,9 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
},
Containers: []v1.Container{
{
Name: name,
Image: image,
Name: name,
Image: image,
ImagePullPolicy: v1.PullPolicy(v.k3SImagePullPolicy),
SecurityContext: &v1.SecurityContext{
Privileged: ptr.To(true),
},

View File

@@ -56,10 +56,12 @@ type ClusterReconciler struct {
Scheme *runtime.Scheme
SharedAgentImage string
SharedAgentImagePullPolicy string
K3SImage string
K3SImagePullPolicy string
}
// Add adds a new controller to the manager
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy string) error {
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy string, k3SImage string, k3SImagePullPolicy string) error {
discoveryClient, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
return err
@@ -76,6 +78,8 @@ func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgent
Scheme: mgr.GetScheme(),
SharedAgentImage: sharedAgentImage,
SharedAgentImagePullPolicy: sharedAgentImagePullPolicy,
K3SImage: k3SImage,
K3SImagePullPolicy: k3SImagePullPolicy,
}
return ctrl.NewControllerManagedBy(mgr).
@@ -171,7 +175,7 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
return err
}
s := server.New(cluster, c.Client, token, string(cluster.Spec.Mode))
s := server.New(cluster, c.Client, token, string(cluster.Spec.Mode), c.K3SImage, c.K3SImagePullPolicy)
cluster.Status.Persistence = cluster.Spec.Persistence
if cluster.Spec.Persistence.StorageRequestSize == "" {
@@ -526,7 +530,7 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.C
var agentEnsurer agent.ResourceEnsurer
if cluster.Spec.Mode == agent.VirtualNodeMode {
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token)
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token, c.K3SImage, c.K3SImagePullPolicy)
} else {
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token)
}

View File

@@ -60,7 +60,7 @@ var _ = BeforeSuite(func() {
Expect(err).NotTo(HaveOccurred())
ctx, cancel = context.WithCancel(context.Background())
err = cluster.Add(ctx, mgr, "rancher/k3k-kubelet:latest", "")
err = cluster.Add(ctx, mgr, "rancher/k3k-kubelet:latest", "", "rancher/k3s", "")
Expect(err).NotTo(HaveOccurred())
go func() {

View File

@@ -28,18 +28,22 @@ const (
// Server
type Server struct {
cluster *v1alpha1.Cluster
client client.Client
mode string
token string
cluster *v1alpha1.Cluster
client client.Client
mode string
token string
k3SImage string
k3SImagePullPolicy string
}
func New(cluster *v1alpha1.Cluster, client client.Client, token, mode string) *Server {
func New(cluster *v1alpha1.Cluster, client client.Client, token, mode string, k3SImage string, k3SImagePullPolicy string) *Server {
return &Server{
cluster: cluster,
client: client,
token: token,
mode: mode,
cluster: cluster,
client: client,
token: token,
mode: mode,
k3SImage: k3SImage,
k3SImagePullPolicy: k3SImagePullPolicy,
}
}
@@ -109,8 +113,9 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
},
Containers: []v1.Container{
{
Name: name,
Image: image,
Name: name,
Image: image,
ImagePullPolicy: v1.PullPolicy(s.k3SImagePullPolicy),
Env: []v1.EnvVar{
{
Name: "POD_NAME",
@@ -244,7 +249,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
persistent bool
)
image := controller.K3SImage(s.cluster)
image := controller.K3SImage(s.cluster, s.k3SImage)
name := controller.SafeConcatNameWithPrefix(s.cluster.Name, serverName)
replicas = *s.cluster.Spec.Servers

View File

@@ -12,7 +12,6 @@ import (
const (
namePrefix = "k3k"
k3SImageName = "rancher/k3s"
AdminCommonName = "system:admin"
)
@@ -27,16 +26,16 @@ var Backoff = wait.Backoff{
// K3SImage returns the rancher/k3s image tagged with the specified Version.
// If Version is empty it will use with the same k8s version of the host cluster,
// stored in the Status object. It will return the untagged version as last fallback.
func K3SImage(cluster *v1alpha1.Cluster) string {
func K3SImage(cluster *v1alpha1.Cluster, k3SImage string) string {
if cluster.Spec.Version != "" {
return k3SImageName + ":" + cluster.Spec.Version
return k3SImage + ":" + cluster.Spec.Version
}
if cluster.Status.HostVersion != "" {
return k3SImageName + ":" + cluster.Status.HostVersion
return k3SImage + ":" + cluster.Status.HostVersion
}
return k3SImageName
return k3SImage
}
// SafeConcatNameWithPrefix runs the SafeConcatName with extra prefix.