Compare commits

...

9 Commits

Author SHA1 Message Date
Enrico Candino
f5d2e981ab Bump Charts to 0.3.3-r3 (#391)
* bump charts to 0.3.3-r2

* bump charts to 0.3.3-r3
2025-06-20 18:19:10 +02:00
jpgouin
541f506d9d [CLI] add storage-request-size flag (#372)
[CLI] add storage-request-size flag
2025-06-20 17:13:47 +02:00
Enrico Candino
f389a4e2be Fix Network Policy reconciliation (#388)
* logs

* fix delete cleanup

* update spec

* added policyName to status, skip netpol for policy managed clusters
2025-06-20 16:10:47 +02:00
jpgouin
818328c9d4 fix-howto usage of serverEnvs and agentEnvs (#385) 2025-06-20 12:32:43 +02:00
Enrico Candino
0c4752039d Fix for k3kcli policy delete (#386)
* fix for delete policy

* fix docs
2025-06-20 12:08:51 +02:00
Enrico Candino
eca219cb48 kubelet controllers cleanup (#376) 2025-06-20 12:08:28 +02:00
Hussein Galal
d1f88c32b3 Ephemeral containers fix (#371)
* Update virtual kubelet and k8s to 1.31.4

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix ephemeral containers in provider

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix linters

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix comments

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-06-20 12:52:45 +03:00
Enrico Candino
b8f0e77a71 fix for empty namespace (#375) 2025-06-19 14:28:27 +02:00
jpgouin
08ba3944e0 [DOC] add how-to create virtual clusters (#373)
* [DOC] add how-to create virtual clusters
2025-06-06 12:39:51 +02:00
32 changed files with 837 additions and 1744 deletions

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.3.3-r1
appVersion: v0.3.3-rc1
version: 0.3.3-r3
appVersion: v0.3.3-rc3

View File

@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
controller-gen.kubebuilder.io/version: v0.16.0
name: clusters.k3k.io
spec:
group: k3k.io
@@ -18,6 +18,9 @@ spec:
- jsonPath: .spec.mode
name: Mode
type: string
- jsonPath: .status.policyName
name: Policy
type: string
name: v1alpha1
schema:
openAPIV3Schema:
@@ -102,10 +105,13 @@ spec:
description: The key to select.
type: string
name:
default: ""
description: |-
Name of the referent.
This field is effectively required, but due to backwards compatibility is
allowed to be empty. Instances of this type with an empty value here are
almost certainly wrong.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?
type: string
optional:
description: Specify whether the ConfigMap or its key
@@ -164,10 +170,13 @@ spec:
be a valid secret key.
type: string
name:
default: ""
description: |-
Name of the referent.
This field is effectively required, but due to backwards compatibility is
allowed to be empty. Instances of this type with an empty value here are
almost certainly wrong.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?
type: string
optional:
description: Specify whether the Secret or its key must
@@ -363,10 +372,13 @@ spec:
description: The key to select.
type: string
name:
default: ""
description: |-
Name of the referent.
This field is effectively required, but due to backwards compatibility is
allowed to be empty. Instances of this type with an empty value here are
almost certainly wrong.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?
type: string
optional:
description: Specify whether the ConfigMap or its key
@@ -425,10 +437,13 @@ spec:
be a valid secret key.
type: string
name:
default: ""
description: |-
Name of the referent.
This field is effectively required, but due to backwards compatibility is
allowed to be empty. Instances of this type with an empty value here are
almost certainly wrong.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?
type: string
optional:
description: Specify whether the Secret or its key must
@@ -540,6 +555,10 @@ spec:
required:
- type
type: object
policyName:
description: PolicyName specifies the virtual cluster policy name
bound to the virtual cluster.
type: string
serviceCIDR:
description: ServiceCIDR is the CIDR range for service IPs.
type: string

View File

@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
controller-gen.kubebuilder.io/version: v0.16.0
name: virtualclusterpolicies.k3k.io
spec:
group: k3k.io
@@ -146,6 +146,7 @@ spec:
- type
type: object
type: array
x-kubernetes-list-type: atomic
required:
- limits
type: object
@@ -204,11 +205,13 @@ spec:
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- operator
- scopeName
type: object
type: array
x-kubernetes-list-type: atomic
type: object
x-kubernetes-map-type: atomic
scopes:
@@ -220,6 +223,7 @@ spec:
match each object tracked by a quota
type: string
type: array
x-kubernetes-list-type: atomic
type: object
type: object
status:
@@ -229,16 +233,8 @@ spec:
description: Conditions are the individual conditions for the cluster
set.
items:
description: "Condition contains details for one aspect of the current
state of this API Resource.\n---\nThis struct is intended for
direct use as an array at the field path .status.conditions. For
example,\n\n\n\ttype FooStatus struct{\n\t // Represents the
observations of a foo's current state.\n\t // Known .status.conditions.type
are: \"Available\", \"Progressing\", and \"Degraded\"\n\t //
+patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t
\ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t
\ // other fields\n\t}"
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
@@ -279,12 +275,7 @@ spec:
- Unknown
type: string
type:
description: |-
type of condition in CamelCase or in foo.example.com/CamelCase.
---
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
useful (see .node.status.conditions), the ability to deconflict is important.
The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string

View File

@@ -33,6 +33,7 @@ type CreateConfig struct {
agentEnvs cli.StringSlice
persistenceType string
storageClassName string
storageRequestSize string
version string
mode string
kubeconfigServerHost string
@@ -41,14 +42,17 @@ type CreateConfig struct {
func NewClusterCreateCmd(appCtx *AppContext) *cli.Command {
createConfig := &CreateConfig{}
createFlags := NewCreateFlags(createConfig)
flags := CommonFlags(appCtx)
flags = append(flags, FlagNamespace(appCtx))
flags = append(flags, newCreateFlags(createConfig)...)
return &cli.Command{
Name: "create",
Usage: "Create new cluster",
UsageText: "k3kcli cluster create [command options] NAME",
Action: createAction(appCtx, createConfig),
Flags: WithCommonFlags(appCtx, createFlags...),
Flags: flags,
HideHelpCommand: true,
}
}
@@ -170,8 +174,9 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
Version: config.version,
Mode: v1alpha1.ClusterMode(config.mode),
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.PersistenceMode(config.persistenceType),
StorageClassName: ptr.To(config.storageClassName),
Type: v1alpha1.PersistenceMode(config.persistenceType),
StorageClassName: ptr.To(config.storageClassName),
StorageRequestSize: config.storageRequestSize,
},
},
}

View File

@@ -5,9 +5,10 @@ import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/urfave/cli/v2"
"k8s.io/apimachinery/pkg/api/resource"
)
func NewCreateFlags(config *CreateConfig) []cli.Flag {
func newCreateFlags(config *CreateConfig) []cli.Flag {
return []cli.Flag{
&cli.IntFlag{
Name: "servers",
@@ -60,6 +61,17 @@ func NewCreateFlags(config *CreateConfig) []cli.Flag {
Usage: "storage class name for dynamic persistence type",
Destination: &config.storageClassName,
},
&cli.StringFlag{
Name: "storage-request-size",
Usage: "storage size for dynamic persistence type",
Destination: &config.storageRequestSize,
Action: func(ctx *cli.Context, value string) error {
if _, err := resource.ParseQuantity(value); err != nil {
return errors.New(`invalid storage size, should be a valid resource quantity e.g "10Gi"`)
}
return nil
},
},
&cli.StringSliceFlag{
Name: "server-args",
Usage: "servers extra arguments",

View File

@@ -20,16 +20,22 @@ import (
var keepData bool
func NewClusterDeleteCmd(appCtx *AppContext) *cli.Command {
return &cli.Command{
Name: "delete",
Usage: "Delete an existing cluster",
UsageText: "k3kcli cluster delete [command options] NAME",
Action: delete(appCtx),
Flags: WithCommonFlags(appCtx, &cli.BoolFlag{
flags := CommonFlags(appCtx)
flags = append(flags, FlagNamespace(appCtx))
flags = append(flags,
&cli.BoolFlag{
Name: "keep-data",
Usage: "keeps persistence volumes created for the cluster after deletion",
Destination: &keepData,
}),
},
)
return &cli.Command{
Name: "delete",
Usage: "Delete an existing cluster",
UsageText: "k3kcli cluster delete [command options] NAME",
Action: delete(appCtx),
Flags: flags,
HideHelpCommand: true,
}
}

View File

@@ -12,12 +12,15 @@ import (
)
func NewClusterListCmd(appCtx *AppContext) *cli.Command {
flags := CommonFlags(appCtx)
flags = append(flags, FlagNamespace(appCtx))
return &cli.Command{
Name: "list",
Usage: "List all the existing cluster",
UsageText: "k3kcli cluster list [command options]",
Action: list(appCtx),
Flags: WithCommonFlags(appCtx),
Flags: flags,
HideHelpCommand: true,
}
}

View File

@@ -23,14 +23,17 @@ import (
)
var (
name string
cn string
org cli.StringSlice
altNames cli.StringSlice
expirationDays int64
configName string
kubeconfigServerHost string
generateKubeconfigFlags = []cli.Flag{
name string
cn string
org cli.StringSlice
altNames cli.StringSlice
expirationDays int64
configName string
kubeconfigServerHost string
)
func newGenerateKubeconfigFlags(appCtx *AppContext) []cli.Flag {
return []cli.Flag{
&cli.StringFlag{
Name: "name",
Usage: "cluster name",
@@ -70,7 +73,7 @@ var (
Value: "",
},
}
)
}
func NewKubeconfigCmd(appCtx *AppContext) *cli.Command {
return &cli.Command{
@@ -83,12 +86,16 @@ func NewKubeconfigCmd(appCtx *AppContext) *cli.Command {
}
func NewKubeconfigGenerateCmd(appCtx *AppContext) *cli.Command {
flags := CommonFlags(appCtx)
flags = append(flags, FlagNamespace(appCtx))
flags = append(flags, newGenerateKubeconfigFlags(appCtx)...)
return &cli.Command{
Name: "generate",
Usage: "Generate kubeconfig for clusters",
SkipFlagParsing: false,
Action: generate(appCtx),
Flags: WithCommonFlags(appCtx, generateKubeconfigFlags...),
Flags: flags,
}
}

View File

@@ -22,7 +22,8 @@ type VirtualClusterPolicyCreateConfig struct {
func NewPolicyCreateCmd(appCtx *AppContext) *cli.Command {
config := &VirtualClusterPolicyCreateConfig{}
createFlags := []cli.Flag{
flags := CommonFlags(appCtx)
flags = append(flags,
&cli.StringFlag{
Name: "mode",
Usage: "The allowed mode type of the policy",
@@ -37,14 +38,14 @@ func NewPolicyCreateCmd(appCtx *AppContext) *cli.Command {
}
},
},
}
)
return &cli.Command{
Name: "create",
Usage: "Create new policy",
UsageText: "k3kcli policy create [command options] NAME",
Action: policyCreateAction(appCtx, config),
Flags: WithCommonFlags(appCtx, createFlags...),
Flags: flags,
HideHelpCommand: true,
}
}

View File

@@ -2,14 +2,11 @@ package cmds
import (
"context"
"errors"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func NewPolicyDeleteCmd(appCtx *AppContext) *cli.Command {
@@ -18,7 +15,7 @@ func NewPolicyDeleteCmd(appCtx *AppContext) *cli.Command {
Usage: "Delete an existing policy",
UsageText: "k3kcli policy delete [command options] NAME",
Action: policyDeleteAction(appCtx),
Flags: WithCommonFlags(appCtx),
Flags: CommonFlags(appCtx),
HideHelpCommand: true,
}
}
@@ -33,24 +30,13 @@ func policyDeleteAction(appCtx *AppContext) cli.ActionFunc {
}
name := clx.Args().First()
if name == k3kcluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
namespace := appCtx.Namespace(name)
logrus.Infof("Deleting policy in namespace [%s]", namespace)
policy := &v1alpha1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
}
policy := &v1alpha1.VirtualClusterPolicy{}
policy.Name = name
if err := client.Delete(ctx, policy); err != nil {
if apierrors.IsNotFound(err) {
logrus.Warnf("Policy not found in namespace [%s]", namespace)
logrus.Warnf("Policy not found")
} else {
return err
}

View File

@@ -16,7 +16,7 @@ func NewPolicyListCmd(appCtx *AppContext) *cli.Command {
Usage: "List all the existing policies",
UsageText: "k3kcli policy list [command options]",
Action: policyList(appCtx),
Flags: WithCommonFlags(appCtx),
Flags: CommonFlags(appCtx),
HideHelpCommand: true,
}
}

View File

@@ -31,7 +31,7 @@ func NewApp() *cli.App {
app := cli.NewApp()
app.Name = "k3kcli"
app.Usage = "CLI for K3K"
app.Flags = WithCommonFlags(appCtx)
app.Flags = CommonFlags(appCtx)
app.Before = func(clx *cli.Context) error {
if appCtx.Debug {
@@ -94,27 +94,36 @@ func loadRESTConfig(kubeconfig string) (*rest.Config, error) {
return kubeConfig.ClientConfig()
}
func WithCommonFlags(appCtx *AppContext, flags ...cli.Flag) []cli.Flag {
commonFlags := []cli.Flag{
&cli.BoolFlag{
Name: "debug",
Usage: "Turn on debug logs",
Destination: &appCtx.Debug,
EnvVars: []string{"K3K_DEBUG"},
},
&cli.StringFlag{
Name: "kubeconfig",
Usage: "kubeconfig path",
Destination: &appCtx.Kubeconfig,
DefaultText: "$HOME/.kube/config or $KUBECONFIG if set",
},
&cli.StringFlag{
Name: "namespace",
Usage: "namespace to create the k3k cluster in",
Aliases: []string{"n"},
Destination: &appCtx.namespace,
},
func CommonFlags(appCtx *AppContext) []cli.Flag {
return []cli.Flag{
FlagDebug(appCtx),
FlagKubeconfig(appCtx),
}
}
func FlagDebug(appCtx *AppContext) *cli.BoolFlag {
return &cli.BoolFlag{
Name: "debug",
Usage: "Turn on debug logs",
Destination: &appCtx.Debug,
EnvVars: []string{"K3K_DEBUG"},
}
}
func FlagKubeconfig(appCtx *AppContext) *cli.StringFlag {
return &cli.StringFlag{
Name: "kubeconfig",
Usage: "kubeconfig path",
Destination: &appCtx.Kubeconfig,
DefaultText: "$HOME/.kube/config or $KUBECONFIG if set",
}
}
func FlagNamespace(appCtx *AppContext) *cli.StringFlag {
return &cli.StringFlag{
Name: "namespace",
Usage: "namespace of the k3k cluster",
Aliases: []string{"n"},
Destination: &appCtx.namespace,
}
return append(commonFlags, flags...)
}

View File

@@ -9,7 +9,6 @@ k3kcli
```
[--debug]
[--kubeconfig]=[value]
[--namespace|-n]=[value]
```
**Usage**:
@@ -24,8 +23,6 @@ k3kcli [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--namespace, -n**="": namespace to create the k3k cluster in
# COMMANDS
@@ -55,7 +52,7 @@ Create new cluster
**--mode**="": k3k mode type (shared, virtual) (default: "shared")
**--namespace, -n**="": namespace to create the k3k cluster in
**--namespace, -n**="": namespace of the k3k cluster
**--persistence-type**="": persistence mode for the nodes (dynamic, ephemeral, static) (default: "dynamic")
@@ -71,6 +68,8 @@ Create new cluster
**--storage-class-name**="": storage class name for dynamic persistence type
**--storage-request-size**="": storage size for dynamic persistence type
**--token**="": token of the cluster
**--version**="": k3s version
@@ -87,7 +86,7 @@ Delete an existing cluster
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--namespace, -n**="": namespace to create the k3k cluster in
**--namespace, -n**="": namespace of the k3k cluster
### list
@@ -99,7 +98,7 @@ List all the existing cluster
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--namespace, -n**="": namespace to create the k3k cluster in
**--namespace, -n**="": namespace of the k3k cluster
## policy
@@ -117,8 +116,6 @@ Create new policy
**--mode**="": The allowed mode type of the policy (default: "shared")
**--namespace, -n**="": namespace to create the k3k cluster in
### delete
Delete an existing policy
@@ -129,8 +126,6 @@ Delete an existing policy
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--namespace, -n**="": namespace to create the k3k cluster in
### list
List all the existing policies
@@ -141,8 +136,6 @@ List all the existing policies
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--namespace, -n**="": namespace to create the k3k cluster in
## kubeconfig
Manage kubeconfig for clusters
@@ -167,6 +160,6 @@ Generate kubeconfig for clusters
**--name**="": cluster name
**--namespace, -n**="": namespace to create the k3k cluster in
**--namespace, -n**="": namespace of the k3k cluster
**--org**="": Organization name (ORG) of the generated certificates for the kubeconfig

View File

@@ -0,0 +1,302 @@
# How to: Create a Virtual Cluster
This guide walks through the various ways to create and manage virtual clusters in K3K. We'll cover common use cases using both the **Custom Resource Definitions (CRDs)** and the **K3K CLI**, so you can choose the method that fits your workflow.
> 📘 For full reference:
> - [CRD Reference Documentation](../crds/crd-docs.md)
> - [CLI Reference Documentation](../cli/cli-docs.md)
> - [Full example](../advanced-usage.md)
> [!NOTE]
> 🚧 Some features are currently only available via the CRD interface. CLI support may be added in the future.
---
## Use Case: Create and Expose a Basic Virtual Cluster
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-ingress
spec:
tlsSANs:
- my-cluster.example.com
expose:
ingress:
ingressClassName: nginx
annotations:
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
nginx.ingress.kubernetes.io/ssl-redirect: "HTTPS"
```
This will create a virtual cluster in `shared` mode and expose it via an ingress with the specified hostname.
### CLI Method
*No CLI method available yet*
---
## Use Case: Create a Virtual Cluster with Persistent Storage (**Default**)
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-persistent
spec:
persistence:
type: dynamic
storageClassName: local-path
storageRequestSize: 30Gi
```
This ensures that the virtual cluster stores its state persistently with a 30Gi volume.
If `storageClassName` is not set it will default to the default StorageClass.
If `storageRequestSize` is not set it will request a 1Gi volume by default.
### CLI Method
```sh
k3kcli cluster create \
--persistence-type dynamic \
--storage-class-name local-path \
k3kcluster-persistent
```
> [!NOTE]
> The `k3kcli` does not support configuring the `storageRequestSize` yet.
---
## Use Case: Create a Highly Available Virtual Cluster in `shared` mode
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-ha
spec:
servers: 3
```
This will create a virtual cluster with 3 servers and a default 1Gi volume for persistence.
### CLI Method
```sh
k3kcli cluster create \
--servers 3 \
k3kcluster-ha
```
---
## Use Case: Create a Highly Available Virtual Cluster in `virtual` mode
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-virtual
spec:
mode: virtual
servers: 3
agents: 3
```
This will create a virtual cluster with 3 servers and 3 agents and a default 1Gi volume for persistence.
> [!NOTE]
> Agents only exist for `virtual` mode.
### CLI Method
```sh
k3kcli cluster create \
--agents 3 \
--servers 3 \
--mode virtual \
k3kcluster-virtual
```
---
## Use Case: Create an Ephemeral Virtual Cluster
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-ephemeral
spec:
persistence:
type: ephemeral
```
This will create an ephemeral virtual cluster with no persistence and a single server.
### CLI Method
```sh
k3kcli cluster create \
--persistence-type ephemeral \
k3kcluster-ephemeral
```
---
## Use Case: Create a Virtual Cluster with a Custom Kubernetes Version
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-custom-k8s
spec:
version: "v1.33.1-k3s1"
```
This sets the virtual cluster's Kubernetes version explicitly.
> [!NOTE]
> Only [K3s](https://k3s.io) distributions are supported. You can find compatible versions on the K3s GitHub [release page](https://github.com/k3s-io/k3s/releases).
### CLI Method
```sh
k3kcli cluster create \
--version v1.33.1-k3s1 \
k3kcluster-custom-k8s
```
---
## Use Case: Create a Virtual Cluster with Custom Resource Limits
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-resourced
spec:
mode: virtual
serverLimit:
cpu: "1"
memory: "2Gi"
workerLimit:
cpu: "1"
memory: "2Gi"
```
This configures the CPU and memory limit for the virtual cluster.
### CLI Method
*No CLI method available yet*
---
## Use Case: Create a Virtual Cluster on specific host nodes
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-node-placed
spec:
nodeSelector:
disktype: ssd
```
This places the virtual cluster on nodes with the label `disktype: ssd`.
> [!NOTE]
> In `shared` mode workloads are also scheduled on the selected nodes
### CLI Method
*No CLI method available yet*
---
## Use Case: Create a Virtual Cluster with a Rancher Host Cluster Kubeconfig
When using a `kubeconfig` generated with Rancher, you need to specify with the CLI the desired host for the virtual cluster `kubeconfig`.
By default, `k3kcli` uses the current host `kubeconfig` to determine the target cluster.
### CRD Method
*Not applicable*
### CLI Method
```sh
k3kcli cluster create \
--kubeconfig-server https://abc.xyz \
k3kcluster-host-rancher
```
---
## Use Case: Create a Virtual Cluster Behind an HTTP Proxy
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-http-proxy
spec:
serverEnvs:
- name: HTTP_PROXY
value: "http://abc.xyz"
agentEnvs:
- name: HTTP_PROXY
value: "http://abc.xyz"
```
This configures an HTTP proxy for both servers and agents in the virtual cluster.
> [!NOTE]
> This can be leveraged to pass **any custom environment variables** to the servers and agents — not just proxy settings.
### CLI Method
```sh
k3kcli cluster create \
--server-envs HTTP_PROXY=http://abc.xyz \
--agent-envs HTTP_PROXY=http://abc.xyz \
k3kcluster-http-proxy
```
---
## How to: Connect to a Virtual Cluster
Once the virtual cluster is running, you can connect to it using the CLI:
### CLI Method
```sh
k3kcli kubeconfig generate --namespace k3k-mycluster --name mycluster
export KUBECONFIG=$PWD/mycluster-kubeconfig.yaml
kubectl get nodes
```
This command generates a `kubeconfig` file, which you can use to access your virtual cluster via `kubectl`.

73
go.mod
View File

@@ -3,43 +3,46 @@ module github.com/rancher/k3k
go 1.24.2
replace (
github.com/google/cel-go => github.com/google/cel-go v0.17.7
github.com/google/cel-go => github.com/google/cel-go v0.20.1
github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0
github.com/prometheus/client_model => github.com/prometheus/client_model v0.6.1
github.com/prometheus/common => github.com/prometheus/common v0.47.0
github.com/prometheus/common => github.com/prometheus/common v0.64.0
golang.org/x/term => golang.org/x/term v0.15.0
)
require (
github.com/go-logr/zapr v1.3.0
github.com/google/go-cmp v0.7.0
github.com/onsi/ginkgo/v2 v2.21.0
github.com/onsi/gomega v1.36.0
github.com/prometheus/client_model v0.6.1
github.com/rancher/dynamiclistener v1.27.5
github.com/sirupsen/logrus v1.9.3
github.com/stretchr/testify v1.10.0
github.com/testcontainers/testcontainers-go v0.35.0
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0
github.com/urfave/cli/v2 v2.27.5
github.com/virtual-kubelet/virtual-kubelet v1.11.0
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803
go.etcd.io/etcd/api/v3 v3.5.16
go.etcd.io/etcd/client/v3 v3.5.16
go.uber.org/zap v1.27.0
gopkg.in/yaml.v2 v2.4.0
helm.sh/helm/v3 v3.14.4
k8s.io/api v0.29.11
k8s.io/apiextensions-apiserver v0.29.11
k8s.io/apimachinery v0.29.11
k8s.io/apiserver v0.29.11
k8s.io/cli-runtime v0.29.11
k8s.io/client-go v0.29.11
k8s.io/component-base v0.29.11
k8s.io/component-helpers v0.29.11
k8s.io/kubectl v0.29.11
k8s.io/api v0.31.4
k8s.io/apiextensions-apiserver v0.31.4
k8s.io/apimachinery v0.31.4
k8s.io/apiserver v0.31.4
k8s.io/cli-runtime v0.31.4
k8s.io/client-go v0.31.4
k8s.io/component-base v0.31.4
k8s.io/component-helpers v0.31.4
k8s.io/kubectl v0.31.4
k8s.io/kubelet v0.31.4
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/controller-runtime v0.17.5
sigs.k8s.io/controller-runtime v0.19.4
)
require github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
require (
dario.cat/mergo v1.0.1 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
@@ -52,7 +55,6 @@ require (
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
@@ -84,6 +86,7 @@ require (
github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
@@ -100,12 +103,11 @@ require (
github.com/google/btree v1.1.3 // indirect
github.com/google/cel-go v0.22.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
@@ -119,7 +121,7 @@ require (
github.com/jmoiron/sqlx v1.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/lib/pq v1.10.9 // indirect
@@ -154,8 +156,9 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.2
github.com/prometheus/common v0.64.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rubenv/sql-migrate v1.7.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
@@ -168,6 +171,7 @@ require (
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
@@ -176,37 +180,38 @@ require (
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
go.opentelemetry.io/otel v1.28.0 // indirect
go.opentelemetry.io/otel v1.33.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
go.opentelemetry.io/otel/trace v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.33.0 // indirect
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
go.opentelemetry.io/otel/trace v1.33.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/crypto v0.38.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.33.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.14.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/term v0.32.0 // indirect
golang.org/x/text v0.25.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.26.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
google.golang.org/grpc v1.65.0 // indirect
google.golang.org/protobuf v1.35.1 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kms v0.29.11 // indirect
k8s.io/kms v0.31.4 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
oras.land/oras-go v1.2.5 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect

1544
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -5,14 +5,12 @@ import (
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/log"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -24,44 +22,44 @@ const (
)
type PVCReconciler struct {
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
clusterName string
clusterNamespace string
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
logger *log.Logger
Translator translate.ToHostTranslator
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
}
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
translator := translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := PVCReconciler{
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
logger: logger.Named(pvcController),
Translator: translator,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
}
return ctrl.NewControllerManagedBy(virtMgr).
Named(pvcController).
For(&v1.PersistentVolumeClaim{}).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Complete(&reconciler)
}
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := r.logger.With("Cluster", r.clusterName, "PersistentVolumeClaim", req.NamespacedName)
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
virtPVC v1.PersistentVolumeClaim
@@ -72,7 +70,6 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
return reconcile.Result{}, err
}
// handling persistent volume sync
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}

View File

@@ -5,7 +5,6 @@ import (
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/log"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -13,7 +12,6 @@ import (
"k8s.io/component-helpers/storage/volume"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
@@ -24,44 +22,44 @@ const (
)
type PodReconciler struct {
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
clusterName string
clusterNamespace string
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
logger *log.Logger
Translator translate.ToHostTranslator
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
}
// AddPodPVCController adds pod controller to k3k-kubelet
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
translator := translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := PodReconciler{
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
logger: logger.Named(podController),
Translator: translator,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
}
return ctrl.NewControllerManagedBy(virtMgr).
Named(podController).
For(&v1.Pod{}).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Complete(&reconciler)
}
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
virtPod v1.Pod
@@ -72,7 +70,6 @@ func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
return reconcile.Result{}, err
}
// handling pod
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
@@ -95,6 +92,7 @@ func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
// and then created on the host, the PV is not synced to the host cluster.
func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pvcSource *v1.PersistentVolumeClaimVolumeSource) error {
log := ctrl.LoggerFrom(ctx).WithValues("PersistentVolumeClaim", pvcSource.ClaimName)
ctx = ctrl.LoggerInto(ctx, log)
var pvc v1.PersistentVolumeClaim

View File

@@ -5,15 +5,12 @@ import (
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/log"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -21,49 +18,47 @@ import (
const (
serviceSyncerController = "service-syncer-controller"
maxConcurrentReconciles = 1
serviceFinalizerName = "service.k3k.io/finalizer"
)
type ServiceReconciler struct {
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
clusterName string
clusterNamespace string
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
logger *log.Logger
Translator translate.ToHostTranslator
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
}
// AddServiceSyncer adds service syncer controller to the manager of the virtual cluster
func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
translator := translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := ServiceReconciler{
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
logger: logger.Named(serviceSyncerController),
Translator: translator,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
}
return ctrl.NewControllerManagedBy(virtMgr).
Named(serviceSyncerController).
For(&v1.Service{}).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Complete(&reconciler)
}
func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := s.logger.With("Cluster", s.clusterName, "Service", req.NamespacedName)
func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
if req.Name == "kubernetes" || req.Name == "kube-dns" {
return reconcile.Result{}, nil
@@ -71,27 +66,26 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
var (
virtService v1.Service
hostService v1.Service
cluster v1alpha1.Cluster
)
// getting the cluster for setting the controller reference
if err := s.hostClient.Get(ctx, types.NamespacedName{Name: s.clusterName, Namespace: s.clusterNamespace}, &cluster); err != nil {
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := s.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedService := s.service(&virtService)
if err := controllerutil.SetControllerReference(&cluster, syncedService, s.HostScheme); err != nil {
syncedService := r.service(&virtService)
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostScheme); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtService.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
if err := s.hostClient.Delete(ctx, syncedService); err != nil {
if err := r.hostClient.Delete(ctx, syncedService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
@@ -99,7 +93,7 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
if controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName)
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
if err := r.virtualClient.Update(ctx, &virtService); err != nil {
return reconcile.Result{}, err
}
}
@@ -111,15 +105,17 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
if !controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
controllerutil.AddFinalizer(&virtService, serviceFinalizerName)
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
if err := r.virtualClient.Update(ctx, &virtService); err != nil {
return reconcile.Result{}, err
}
}
// create or update the service on host
if err := s.hostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: s.clusterNamespace}, &hostService); err != nil {
var hostService v1.Service
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: r.clusterNamespace}, &hostService); err != nil {
if apierrors.IsNotFound(err) {
log.Info("creating the service for the first time on the host cluster")
return reconcile.Result{}, s.hostClient.Create(ctx, syncedService)
return reconcile.Result{}, r.hostClient.Create(ctx, syncedService)
}
return reconcile.Result{}, err
@@ -127,7 +123,7 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
log.Info("updating service on the host cluster")
return reconcile.Result{}, s.hostClient.Update(ctx, syncedService)
return reconcile.Result{}, r.hostClient.Update(ctx, syncedService)
}
func (s *ServiceReconciler) service(obj *v1.Service) *v1.Service {

View File

@@ -10,6 +10,7 @@ import (
"net/http"
"time"
"github.com/go-logr/zapr"
certutil "github.com/rancher/dynamiclistener/cert"
k3kkubeletcontroller "github.com/rancher/k3k/k3k-kubelet/controller"
k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook"
@@ -93,6 +94,8 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, err
}
ctrl.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
hostMgr, err := ctrl.NewManager(hostConfig, manager.Options{
Scheme: baseScheme,
LeaderElection: true,
@@ -144,19 +147,19 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
logger.Info("adding service syncer controller")
if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add service syncer controller: " + err.Error())
}
logger.Info("adding pvc syncer controller")
if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add pvc syncer controller: " + err.Error())
}
logger.Info("adding pod pvc controller")
if err := k3kkubeletcontroller.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
if err := k3kkubeletcontroller.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add pod pvc controller: " + err.Error())
}

View File

@@ -10,8 +10,8 @@ package collectors
import (
"time"
stats "github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
compbasemetrics "k8s.io/component-base/metrics"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
)
// defining metrics

View File

@@ -34,7 +34,6 @@ func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servic
// configure versions
node.Status.NodeInfo.KubeletVersion = version
node.Status.NodeInfo.KubeProxyVersion = version
updateNodeCapacityInterval := 10 * time.Second
ticker := time.NewTicker(updateNodeCapacityInterval)

View File

@@ -11,6 +11,7 @@ import (
"strings"
"time"
"github.com/google/go-cmp/cmp"
dto "github.com/prometheus/client_model/go"
"github.com/rancher/k3k/k3k-kubelet/controller"
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
@@ -20,7 +21,6 @@ import (
k3kcontroller "github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
"github.com/virtual-kubelet/virtual-kubelet/node/api"
"github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
@@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/utils/ptr"
"errors"
@@ -209,7 +210,7 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
}
// GetStatsSummary gets the stats for the node, including running pods
func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary, error) {
func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
p.logger.Debug("GetStatsSummary")
nodeList := &v1.NodeList{}
@@ -219,8 +220,8 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
// fetch the stats from all the nodes
var (
nodeStats statsv1alpha1.NodeStats
allPodsStats []statsv1alpha1.PodStats
nodeStats stats.NodeStats
allPodsStats []stats.PodStats
)
for _, n := range nodeList.Items {
@@ -238,7 +239,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
)
}
stats := &statsv1alpha1.Summary{}
stats := &stats.Summary{}
if err := json.Unmarshal(res, stats); err != nil {
return nil, err
}
@@ -262,9 +263,9 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
podsNameMap[hostPodName] = pod
}
filteredStats := &statsv1alpha1.Summary{
filteredStats := &stats.Summary{
Node: nodeStats,
Pods: make([]statsv1alpha1.PodStats, 0),
Pods: make([]stats.PodStats, 0),
}
for _, podStat := range allPodsStats {
@@ -275,7 +276,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
// rewrite the PodReference to match the data of the virtual cluster
if pod, found := podsNameMap[podStat.PodRef.Name]; found {
podStat.PodRef = statsv1alpha1.PodReference{
podStat.PodRef = stats.PodReference{
Name: pod.Name,
Namespace: pod.Namespace,
UID: string(pod.UID),
@@ -582,6 +583,31 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
return fmt.Errorf("unable to get pod to update from virtual cluster: %w", err)
}
hostNamespaceName := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: p.Translator.TranslateName(pod.Namespace, pod.Name),
}
var currentHostPod corev1.Pod
if err := p.HostClient.Get(ctx, hostNamespaceName, &currentHostPod); err != nil {
return fmt.Errorf("unable to get pod to update from host cluster: %w", err)
}
// Handle ephemeral containers
if !cmp.Equal(currentHostPod.Spec.EphemeralContainers, pod.Spec.EphemeralContainers) {
p.logger.Info("Updating ephemeral containers")
currentHostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, currentHostPod.Name, &currentHostPod, metav1.UpdateOptions{}); err != nil {
p.logger.Errorf("error when updating ephemeral containers: %v", err)
return err
}
return nil
}
currentVirtualPod.Spec.Containers = updateContainerImages(currentVirtualPod.Spec.Containers, pod.Spec.Containers)
currentVirtualPod.Spec.InitContainers = updateContainerImages(currentVirtualPod.Spec.InitContainers, pod.Spec.InitContainers)
@@ -597,17 +623,6 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
}
// Update Pod in the host cluster
hostNamespaceName := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: p.Translator.TranslateName(pod.Namespace, pod.Name),
}
var currentHostPod corev1.Pod
if err := p.HostClient.Get(ctx, hostNamespaceName, &currentHostPod); err != nil {
return fmt.Errorf("unable to get pod to update from host cluster: %w", err)
}
currentHostPod.Spec.Containers = updateContainerImages(currentHostPod.Spec.Containers, pod.Spec.Containers)
currentHostPod.Spec.InitContainers = updateContainerImages(currentHostPod.Spec.InitContainers, pod.Spec.InitContainers)
@@ -858,6 +873,11 @@ func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP
for i := range pod.Spec.InitContainers {
pod.Spec.InitContainers[i].Env = overrideEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
}
// handle ephemeral containers as well
for i := range pod.Spec.EphemeralContainers {
pod.Spec.EphemeralContainers[i].Env = overrideEnvVars(pod.Spec.EphemeralContainers[i].Env, updatedEnvVars)
}
}
// overrideEnvVars will override the orig environment variables if found in the updated list
@@ -912,44 +932,16 @@ func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
// assigned annotations
func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
for _, container := range pod.Spec.EphemeralContainers {
addFieldPathAnnotationToEnv(container.Env)
}
// override metadata.name and metadata.namespace with pod annotations
for i, container := range pod.Spec.InitContainers {
for j, envVar := range container.Env {
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
continue
}
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
if fieldPath == translate.MetadataNameField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
pod.Spec.InitContainers[i].Env[j] = envVar
}
if fieldPath == translate.MetadataNamespaceField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.MetadataNamespaceField)
pod.Spec.InitContainers[i].Env[j] = envVar
}
}
for _, container := range pod.Spec.InitContainers {
addFieldPathAnnotationToEnv(container.Env)
}
for i, container := range pod.Spec.Containers {
for j, envVar := range container.Env {
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
continue
}
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
if fieldPath == translate.MetadataNameField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
pod.Spec.Containers[i].Env[j] = envVar
}
if fieldPath == translate.MetadataNamespaceField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
pod.Spec.Containers[i].Env[j] = envVar
}
}
for _, container := range pod.Spec.Containers {
addFieldPathAnnotationToEnv(container.Env)
}
for name, value := range pod.Annotations {
@@ -974,3 +966,22 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
return nil
}
func addFieldPathAnnotationToEnv(envVars []v1.EnvVar) {
for j, envVar := range envVars {
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
continue
}
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
if fieldPath == translate.MetadataNameField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
envVars[j] = envVar
}
if fieldPath == translate.MetadataNamespaceField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
envVars[j] = envVar
}
}
}

View File

@@ -110,6 +110,7 @@ func removeKubeAccessVolume(pod *corev1.Pod) {
for i, volume := range pod.Spec.Volumes {
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
pod.Spec.Volumes = append(pod.Spec.Volumes[:i], pod.Spec.Volumes[i+1:]...)
break
}
}
// init containers
@@ -117,6 +118,17 @@ func removeKubeAccessVolume(pod *corev1.Pod) {
for j, mountPath := range container.VolumeMounts {
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts[:j], pod.Spec.InitContainers[i].VolumeMounts[j+1:]...)
break
}
}
}
// ephemeral containers
for i, container := range pod.Spec.EphemeralContainers {
for j, mountPath := range container.VolumeMounts {
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
pod.Spec.EphemeralContainers[i].VolumeMounts = append(pod.Spec.EphemeralContainers[i].VolumeMounts[:j], pod.Spec.EphemeralContainers[i].VolumeMounts[j+1:]...)
break
}
}
}
@@ -125,6 +137,7 @@ func removeKubeAccessVolume(pod *corev1.Pod) {
for j, mountPath := range container.VolumeMounts {
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts[:j], pod.Spec.Containers[i].VolumeMounts[j+1:]...)
break
}
}
}

View File

@@ -2,7 +2,7 @@ package translate
import (
"encoding/hex"
"fmt"
"strings"
"github.com/rancher/k3k/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -99,14 +99,26 @@ func (t *ToHostTranslator) TranslateFrom(obj client.Object) {
// TranslateName returns the name of the resource in the host cluster. Will not update the object with this name.
func (t *ToHostTranslator) TranslateName(namespace string, name string) string {
var names []string
// some resources are not namespaced (i.e. priorityclasses)
/// for these resources we skip the namespace to avoid having a name like: prioritclass--cluster-123
if namespace == "" {
names = []string{name, t.ClusterName}
} else {
names = []string{name, namespace, t.ClusterName}
}
// we need to come up with a name which is:
// - somewhat connectable to the original resource
// - a valid k8s name
// - idempotently calculatable
// - unique for this combination of name/namespace/cluster
namePrefix := fmt.Sprintf("%s-%s-%s", name, namespace, t.ClusterName)
namePrefix := strings.Join(names, "-")
// use + as a separator since it can't be in an object name
nameKey := fmt.Sprintf("%s+%s+%s", name, namespace, t.ClusterName)
nameKey := strings.Join(names, "+")
// it's possible that the suffix will be in the name, so we use hex to make it valid for k8s
nameSuffix := hex.EncodeToString([]byte(nameKey))

View File

@@ -11,6 +11,7 @@ import (
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.mode",name=Mode,type=string
// +kubebuilder:printcolumn:JSONPath=".status.policyName",name=Policy,type=string
// Cluster defines a virtual Kubernetes cluster managed by k3k.
// It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
@@ -316,6 +317,11 @@ type ClusterStatus struct {
//
// +optional
Persistence PersistenceConfig `json:"persistence,omitempty"`
// PolicyName specifies the virtual cluster policy name bound to the virtual cluster.
//
// +optional
PolicyName string `json:"policyName,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@@ -336,7 +336,7 @@ func (s *SharedAgent) role(ctx context.Context) error {
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"persistentvolumeclaims", "pods", "pods/log", "pods/exec", "secrets", "configmaps", "services"},
Resources: []string{"persistentvolumeclaims", "pods", "pods/log", "pods/exec", "pods/ephemeralcontainers", "secrets", "configmaps", "services"},
Verbs: []string{"*"},
},
{

View File

@@ -15,6 +15,7 @@ import (
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"github.com/rancher/k3k/pkg/controller/policy"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
@@ -24,11 +25,13 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/util/workqueue"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
ctrlruntimecontroller "sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
@@ -84,14 +87,47 @@ func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgent
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.Cluster{}).
WithOptions(ctrlruntimecontroller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Watches(&v1.Namespace{}, namespaceEventHandler(&reconciler)).
Owns(&apps.StatefulSet{}).
Owns(&v1.Service{}).
Complete(&reconciler)
}
func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
return handler.Funcs{
// We don't need to update for create or delete events
CreateFunc: func(context.Context, event.CreateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) {},
DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) {},
// When a Namespace is updated, if it has the "policy.k3k.io/policy-name" label
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
oldNs, okOld := e.ObjectOld.(*v1.Namespace)
newNs, okNew := e.ObjectNew.(*v1.Namespace)
if !okOld || !okNew {
return
}
oldVCPName := oldNs.Labels[policy.PolicyNameLabelKey]
newVCPName := newNs.Labels[policy.PolicyNameLabelKey]
// If policy hasn't changed we can skip the reconciliation
if oldVCPName == newVCPName {
return
}
// Enqueue all the Cluster in the namespace
var clusterList v1alpha1.ClusterList
if err := r.Client.List(ctx, &clusterList, client.InNamespace(oldNs.Name)); err != nil {
return
}
for _, cluster := range clusterList.Items {
q.Add(reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&cluster)})
}
},
}
}
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", req.NamespacedName)
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
@@ -214,6 +250,14 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
}
}
var ns v1.Namespace
if err := c.Client.Get(ctx, client.ObjectKey{Name: cluster.Namespace}, &ns); err != nil {
return err
}
policyName := ns.Labels[policy.PolicyNameLabelKey]
cluster.Status.PolicyName = policyName
if err := c.ensureNetworkPolicy(ctx, cluster); err != nil {
return err
}
@@ -320,6 +364,20 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring network policy")
networkPolicyName := k3kcontroller.SafeConcatNameWithPrefix(cluster.Name)
// network policies are managed by the Policy -> delete the one created as a standalone cluster
if cluster.Status.PolicyName != "" {
netpol := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: networkPolicyName,
Namespace: cluster.Namespace,
},
}
return ctrlruntimeclient.IgnoreNotFound(c.Client.Delete(ctx, netpol))
}
expectedNetworkPolicy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: k3kcontroller.SafeConcatNameWithPrefix(cluster.Name),

View File

@@ -3,6 +3,7 @@ package controller
import (
"crypto/sha256"
"encoding/hex"
"slices"
"strings"
"time"
@@ -45,7 +46,12 @@ func SafeConcatNameWithPrefix(name ...string) string {
// SafeConcatName concatenates the given strings and ensures the returned name is under 64 characters
// by cutting the string off at 57 characters and setting the last 6 with an encoded version of the concatenated string.
// Empty strings in the array will be ignored.
func SafeConcatName(name ...string) string {
name = slices.DeleteFunc(name, func(s string) bool {
return s == ""
})
fullPath := strings.Join(name, "-")
if len(fullPath) < 64 {
return fullPath

View File

@@ -50,9 +50,10 @@ func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context)
}
for _, ns := range namespaces.Items {
deleteOpts := []client.DeleteAllOfOption{
client.InNamespace(ns.Name),
client.MatchingLabels{ManagedByLabelKey: VirtualPolicyControllerName},
selector := labels.NewSelector()
if req, err := labels.NewRequirement(ManagedByLabelKey, selection.Equals, []string{VirtualPolicyControllerName}); err == nil {
selector = selector.Add(*req)
}
// if the namespace is bound to a policy -> cleanup resources of other policies
@@ -63,11 +64,15 @@ func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context)
if err != nil {
log.Error(err, "error creating requirement", "policy", ns.Labels[PolicyNameLabelKey])
} else {
sel := labels.NewSelector().Add(*requirement)
deleteOpts = append(deleteOpts, client.MatchingLabelsSelector{Selector: sel})
selector = selector.Add(*requirement)
}
}
deleteOpts := []client.DeleteAllOfOption{
client.InNamespace(ns.Name),
client.MatchingLabelsSelector{Selector: selector},
}
if err := c.Client.DeleteAllOf(ctx, &networkingv1.NetworkPolicy{}, deleteOpts...); err != nil {
return err
}

View File

@@ -57,7 +57,7 @@ func Add(mgr manager.Manager, clusterCIDR string) error {
func namespaceEventHandler() handler.Funcs {
return handler.Funcs{
// When a Namespace is created, if it has the "policy.k3k.io/policy-name" label
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
ns, ok := e.Object.(*v1.Namespace)
if !ok {
return
@@ -66,10 +66,9 @@ func namespaceEventHandler() handler.Funcs {
if ns.Labels[PolicyNameLabelKey] != "" {
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: ns.Labels[PolicyNameLabelKey]}})
}
},
// When a Namespace is updated, if it has the "policy.k3k.io/policy-name" label
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
oldNs, okOld := e.ObjectOld.(*v1.Namespace)
newNs, okNew := e.ObjectNew.(*v1.Namespace)
@@ -108,7 +107,7 @@ func namespaceEventHandler() handler.Funcs {
},
// When a namespace is deleted all the resources in the namespace are deleted
// but we trigger the reconciliation to eventually perform some cluster-wide cleanup if necessary
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
ns, ok := e.Object.(*v1.Namespace)
if !ok {
return
@@ -125,7 +124,7 @@ func namespaceEventHandler() handler.Funcs {
// This happens only if the ClusterCIDR is NOT specified, to handle the PodCIDRs in the NetworkPolicies.
func nodeEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
// enqueue all the available VirtualClusterPolicies
enqueueAllVCPs := func(ctx context.Context, q workqueue.RateLimitingInterface) {
enqueueAllVCPs := func(ctx context.Context, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
vcpList := &v1alpha1.VirtualClusterPolicyList{}
if err := r.Client.List(ctx, vcpList); err != nil {
return
@@ -137,14 +136,14 @@ func nodeEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
}
return handler.Funcs{
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
if r.ClusterCIDR != "" {
return
}
enqueueAllVCPs(ctx, q)
},
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
if r.ClusterCIDR != "" {
return
}
@@ -170,7 +169,7 @@ func nodeEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
enqueueAllVCPs(ctx, q)
}
},
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
if r.ClusterCIDR != "" {
return
}
@@ -189,7 +188,7 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
return handler.Funcs{
// When a Cluster is created, if its Namespace has the "policy.k3k.io/policy-name" label
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
cluster, ok := e.Object.(*v1alpha1.Cluster)
if !ok {
return
@@ -206,7 +205,7 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
},
// When a Cluster is updated, if its Namespace has the "policy.k3k.io/policy-name" label
// and if some of its spec influenced by the policy changed
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
oldCluster, okOld := e.ObjectOld.(*v1alpha1.Cluster)
newCluster, okNew := e.ObjectNew.(*v1alpha1.Cluster)
@@ -238,7 +237,8 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
}
},
// When a Cluster is deleted -> nothing to do
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {},
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
},
}
}

View File

@@ -3,7 +3,7 @@
set -eou pipefail
CONTROLLER_TOOLS_VERSION=v0.14.0
CONTROLLER_TOOLS_VERSION=v0.16.0
# This will return non-zero until all of our objects in ./pkg/apis can generate valid crds.
# allowDangerousTypes is needed for struct that use floats