Compare commits

...

9 Commits

Author SHA1 Message Date
Parth Yadav
764433bd04 chore(adopters): add coredge.io as an adopter (#962)
This patch updates ADOPTERS.md with Coredge.io addition in the list.

Signed-off-by: Parth Yadav <parth@coredge.io>
2025-09-11 21:42:41 +02:00
dependabot[bot]
0e54d84ebb feat(deps): bump github.com/spf13/viper from 1.20.1 to 1.21.0 (#952)
Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.20.1 to 1.21.0.
- [Release notes](https://github.com/spf13/viper/releases)
- [Commits](https://github.com/spf13/viper/compare/v1.20.1...v1.21.0)

---
updated-dependencies:
- dependency-name: github.com/spf13/viper
  dependency-version: 1.21.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-11 15:26:52 +02:00
dependabot[bot]
b0faf7d31e feat(deps): bump sigs.k8s.io/controller-runtime from 0.22.0 to 0.22.1 (#953)
* feat(deps): bump sigs.k8s.io/controller-runtime from 0.22.0 to 0.22.1

Bumps [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) from 0.22.0 to 0.22.1.
- [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases)
- [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/main/RELEASE.md)
- [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.22.0...v0.22.1)

---
updated-dependencies:
- dependency-name: sigs.k8s.io/controller-runtime
  dependency-version: 0.22.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* chore(golangci-lint): apply is no more deprecated

Signed-off-by: Dario Tranchitella <dario@tranchitella.eu>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Dario Tranchitella <dario@tranchitella.eu>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Dario Tranchitella <dario@tranchitella.eu>
2025-09-11 15:25:42 +02:00
dependabot[bot]
47cc705c98 feat(deps): bump k8s.io/kubernetes in the k8s group (#960)
Bumps the k8s group with 1 update: [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes).


Updates `k8s.io/kubernetes` from 1.34.0 to 1.34.1
- [Release notes](https://github.com/kubernetes/kubernetes/releases)
- [Commits](https://github.com/kubernetes/kubernetes/compare/v1.34.0...v1.34.1)

---
updated-dependencies:
- dependency-name: k8s.io/kubernetes
  dependency-version: 1.34.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: k8s
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-11 15:12:50 +02:00
Dario Tranchitella
17869a4e0f fix(controller-manager): supporting extra args override (#959)
* fix(controller-manager): supporting extra args override

Signed-off-by: Dario Tranchitella <dario@tranchitella.eu>

* chore: removing deprecated intstr.FromInt usage

Signed-off-by: Dario Tranchitella <dario@tranchitella.eu>

---------

Signed-off-by: Dario Tranchitella <dario@tranchitella.eu>
2025-09-10 14:23:32 +02:00
Dario Tranchitella
2a7749839e feat!: inflecting version for konnectivity components from tcp (#934)
* feat(api)!: inflecting version for konnectivity components from tcp

Signed-off-by: Dario Tranchitella <dario@tranchitella.eu>

* feat: inflecting version for konnectivity components from tcp

Signed-off-by: Dario Tranchitella <dario@tranchitella.eu>

* docs(konnectivity): warning about missing container artefacts

Signed-off-by: Dario Tranchitella <dario@tranchitella.eu>

---------

Signed-off-by: Dario Tranchitella <dario@tranchitella.eu>
2025-09-10 12:19:33 +02:00
Adriano Pezzuto
aabbdd96a3 fix(docs): improve capi cluster class (#957)
* fix(docs): improve capi cluster class
2025-09-09 15:53:23 +02:00
Pierre Gaxatte
5d6f512df1 fix(certificates): use a stable format for the rotate annotation value (#955) 2025-09-09 12:27:11 +02:00
Dario Tranchitella
1b4bd884dc docs(capi): updating to latest changes (#956)
Signed-off-by: Dario Tranchitella <dario@tranchitella.eu>
2025-09-09 11:34:30 +02:00
18 changed files with 1837 additions and 227 deletions

View File

@@ -10,6 +10,7 @@ Feel free to open a Pull-Request to get yours listed.
| Vendor | Aknostic | 2023 | [link](https://aknostic.com) | Aknostic is a cloud-native consultancy company using Kamaji to build a Kubernetes based PaaS. |
| R&D | Aruba | 2024 | [link](https://www.aruba.it/home.aspx) | Aruba Cloud is an Italian Cloud Service Provider evaluating Kamaji to build and offer [Managed Kubernetes Service](https://my.arubacloud.com). |
| Vendor | CBWS | 2025 | [link](https://cbws.nl) | CBWS is an European Cloud Provider using Kamaji to build and offer their [Managed Kubernetes Service](https://cbws.nl/cloud/kubernetes/). |
| Vendor | Coredge | 2025 | [link](https://coredge.io/) | Coredge uses Kamaji in its K8saaS offering to save infrastructure costs in its Sovereign Cloud & AI Infrastructure Platform for end-user organisations. |
| Vendor | DCloud | 2024 | [link](https://dcloud.co.id) | DCloud is an Indonesian Cloud Provider using Kamaji to build and offer [Managed Kubernetes Service](https://dcloud.co.id/dkubes.html). |
| Vendor | Dinova | 2025 | [link](https://dinova.one/) | Dinova is an Italian cloud services provider that integrates Kamaji in its datacenters to offer fully managed Kubernetes clusters. |
| End-user | KINX | 2024 | [link](https://kinx.net/?lang=en) | KINX is an Internet infrastructure service provider and will use kamaji for its new [Managed Kubernetes Service](https://kinx.net/service/cloud/kubernetes/intro/?lang=en). |

View File

@@ -226,7 +226,9 @@ type KonnectivityServerSpec struct {
// The port which Konnectivity server is listening to.
Port int32 `json:"port"`
// Container image version of the Konnectivity server.
//+kubebuilder:default=v0.28.6
// If left empty, Kamaji will automatically inflect the version from the deployed Tenant Control Plane.
//
// WARNING: for last cut-off releases, the container image could be not available.
Version string `json:"version,omitempty"`
// Container image used by the Konnectivity server.
//+kubebuilder:default=registry.k8s.io/kas-network-proxy/proxy-server
@@ -250,7 +252,9 @@ type KonnectivityAgentSpec struct {
//+kubebuilder:default=registry.k8s.io/kas-network-proxy/proxy-agent
Image string `json:"image,omitempty"`
// Version for Konnectivity agent.
//+kubebuilder:default=v0.28.6
// If left empty, Kamaji will automatically inflect the version from the deployed Tenant Control Plane.
//
// WARNING: for last cut-off releases, the container image could be not available.
Version string `json:"version,omitempty"`
// Tolerations for the deployed agent.
// Can be customized to start the konnectivity-agent even if the nodes are not ready or tainted.
@@ -275,9 +279,9 @@ type KonnectivityAgentSpec struct {
// KonnectivitySpec defines the spec for Konnectivity.
type KonnectivitySpec struct {
//+kubebuilder:default={version:"v0.28.6",image:"registry.k8s.io/kas-network-proxy/proxy-server",port:8132}
//+kubebuilder:default={image:"registry.k8s.io/kas-network-proxy/proxy-server",port:8132}
KonnectivityServerSpec KonnectivityServerSpec `json:"server,omitempty"`
//+kubebuilder:default={version:"v0.28.6",image:"registry.k8s.io/kas-network-proxy/proxy-agent",mode:"DaemonSet"}
//+kubebuilder:default={image:"registry.k8s.io/kas-network-proxy/proxy-agent",mode:"DaemonSet"}
KonnectivityAgentSpec KonnectivityAgentSpec `json:"agent,omitempty"`
}

View File

@@ -89,7 +89,6 @@ versions:
default:
image: registry.k8s.io/kas-network-proxy/proxy-agent
mode: DaemonSet
version: v0.28.6
properties:
extraArgs:
description: |-
@@ -170,8 +169,11 @@ versions:
type: object
type: array
version:
default: v0.28.6
description: Version for Konnectivity agent.
description: |-
Version for Konnectivity agent.
If left empty, Kamaji will automatically inflect the version from the deployed Tenant Control Plane.
WARNING: for last cut-off releases, the container image could be not available.
type: string
type: object
x-kubernetes-validations:
@@ -181,7 +183,6 @@ versions:
default:
image: registry.k8s.io/kas-network-proxy/proxy-server
port: 8132
version: v0.28.6
properties:
extraArgs:
description: |-
@@ -260,8 +261,11 @@ versions:
type: object
type: object
version:
default: v0.28.6
description: Container image version of the Konnectivity server.
description: |-
Container image version of the Konnectivity server.
If left empty, Kamaji will automatically inflect the version from the deployed Tenant Control Plane.
WARNING: for last cut-off releases, the container image could be not available.
type: string
required:
- port

View File

@@ -97,7 +97,6 @@ spec:
default:
image: registry.k8s.io/kas-network-proxy/proxy-agent
mode: DaemonSet
version: v0.28.6
properties:
extraArgs:
description: |-
@@ -178,8 +177,11 @@ spec:
type: object
type: array
version:
default: v0.28.6
description: Version for Konnectivity agent.
description: |-
Version for Konnectivity agent.
If left empty, Kamaji will automatically inflect the version from the deployed Tenant Control Plane.
WARNING: for last cut-off releases, the container image could be not available.
type: string
type: object
x-kubernetes-validations:
@@ -189,7 +191,6 @@ spec:
default:
image: registry.k8s.io/kas-network-proxy/proxy-server
port: 8132
version: v0.28.6
properties:
extraArgs:
description: |-
@@ -268,8 +269,11 @@ spec:
type: object
type: object
version:
default: v0.28.6
description: Container image version of the Konnectivity server.
description: |-
Container image version of the Konnectivity server.
If left empty, Kamaji will automatically inflect the version from the deployed Tenant Control Plane.
WARNING: for last cut-off releases, the container image could be not available.
type: string
required:
- port

View File

@@ -1,104 +1,642 @@
# Cluster Class
# Cluster Class with Kamaji
Kamaji supports **ClusterClass**, a simple way to create many clusters of a similar shape. This is useful for creating many clusters with the same configuration, such as a development cluster, a staging cluster, and a production cluster.
`ClusterClass` is a Cluster API feature that enables template-based cluster creation. When combined with Kamaji's hosted control plane architecture, `ClusterClass` provides a powerful pattern for standardizing Kubernetes cluster deployments across multiple infrastructure providers while maintaining consistent control plane configurations.
!!! warning "Experimental Feature"
ClusterClass is an experimental feature of Cluster API. As with any experimental features it should be used with caution as it may be unreliable. All experimental features are not subject to any compatibility or deprecation policy and are not yet recommended for production use.
ClusterClass is still an experimental feature of Cluster API. As with any experimental features it should be used with caution. Read more about ClusterClass in the [Cluster API documentation](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/).
You can read more about ClusterClass in the [Cluster API documentation](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/).
## Understanding Cluster Class
## Enabling ClusterClass
`ClusterClass` reduces configuration boilerplate by defining reusable cluster templates. Instead of creating individual resources for each cluster, you define a `ClusterClass` once and create multiple clusters from it with minimal configuration.
To enable ClusterClass, you need to set `CLUSTER_TOPOLOGY` before running `clusterctl init`. This will enable the Cluster API feature gate for ClusterClass.
With Kamaji, this pattern becomes even more powerful:
- **Shared Control Plane Templates**: The same KamajiControlPlaneTemplate works across all infrastructure providers
- **Infrastructure Flexibility**: Deploy worker nodes on vSphere, AWS, Azure, or any supported provider while maintaining consistent control planes
- **Simplified Management**: Hosted control planes reduce the complexity of `ClusterClass` templates
## Enabling Cluster Class
To use `ClusterClass` with Kamaji, you need to enable the cluster topology feature gate before initializing the management cluster:
```bash
export CLUSTER_TOPOLOGY=true
clusterctl init --infrastructure vsphere --control-plane kamaji
clusterctl init --control-plane kamaji --infrastructure vsphere
```
## Creating a ClusterClass
This will install:
- Cluster API core components with `ClusterClass` support
- Kamaji Control Plane Provider
- Your chosen infrastructure provider (vSphere in this example)
To create a ClusterClass, you need to create a `ClusterClass` custom resource. Here is an example of a `ClusterClass` that will create a cluster running control plane on the Kamaji Management Cluster and worker nodes on vSphere:
Verify the installation:
```bash
kubectl get deployments -A | grep -E "capi|kamaji"
```
## Template Architecture with Kamaji
A `ClusterClass` with Kamaji consists of four main components:
1. Control Plane Template (KamajiControlPlaneTemplate): Defines the hosted control plane configuration that remains consistent across infrastructure providers.
2. Infrastructure Template (VSphereClusterTemplate): Provider-specific infrastructure configuration for the cluster.
3. Bootstrap Template (KubeadmConfigTemplate): Node initialization configuration that works across providers.
4. Machine Template (VSphereMachineTemplate): Provider-specific machine configuration for worker nodes.
Here's how these components relate in a `ClusterClass`:
```yaml
apiVersion: cluster.x-k8s.io/v1beta1
kind: ClusterClass
metadata:
name: kamaji-clusterclass
name: kamaji-vsphere-class
spec:
controlPlane:
ref:
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: KamajiControlPlaneTemplate
name: kamaji-clusterclass-kamaji-control-plane-template
# Infrastructure provider template
infrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereClusterTemplate
name: kamaji-clusterclass-vsphere-cluster-template
name: vsphere-cluster-template
# Kamaji control plane template - reusable across providers
controlPlane:
ref:
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: KamajiControlPlaneTemplate
name: kamaji-control-plane-template
# Worker configuration
workers:
machineDeployments:
- class: kamaji-clusterclass
- class: default-worker
template:
bootstrap:
ref:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: kamaji-clusterclass-kubeadm-config-template
name: worker-bootstrap-template
infrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereMachineTemplate
name: kamaji-clusterclass-vsphere-machine-template
# other resources omitted for brevity ...
name: vsphere-worker-template
```
The template file [`capi-kamaji-vsphere-class-template.yaml`](https://raw.githubusercontent.com/clastix/cluster-api-control-plane-provider-kamaji/master/templates/vsphere/capi-kamaji-vsphere-class-template.yaml) provides a full example of a ClusterClass for vSphere. You can generate a ClusterClass manifest using `clusterctl`.
The key advantage: the KamajiControlPlaneTemplate and KubeadmConfigTemplate can be shared across different infrastructure providers, while only the infrastructure-specific templates need to change.
Before you need to list all the variables in the template file:
## Creating a Cluster Class
```bash
cat capi-kamaji-vsphere-class-template.yaml | clusterctl generate yaml --list-variables
Let's create a `ClusterClass` for vSphere with Kamaji. First, define the shared templates:
### KamajiControlPlaneTemplate
This template defines the hosted control plane configuration:
```yaml
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: KamajiControlPlaneTemplate
metadata:
name: kamaji-controlplane
namespace: capi-templates-vsphere
spec:
template:
spec:
dataStoreName: "default" # Default datastore for etcd
network:
serviceType: LoadBalancer
serviceAddress: ""
certSANs: []
addons:
coreDNS: {}
kubeProxy: {}
konnectivity: {}
apiServer:
extraArgs: []
resources:
requests: {}
controllerManager:
extraArgs: []
resources:
requests: {}
scheduler:
extraArgs: []
resources:
requests: {}
kubelet:
cgroupfs: systemd
preferredAddressTypes:
- InternalIP
registry: "registry.k8s.io"
```
Fill them with the desired values and generate the manifest:
### KubeadmConfigTemplate
```bash
clusterctl generate yaml \
--from capi-kamaji-vsphere-class-template.yaml \
> capi-kamaji-vsphere-class.yaml
This bootstrap template configures worker nodes:
```yaml
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: worker-bootstrap-template
spec:
template:
spec:
# Configuration for kubeadm join
joinConfiguration:
discovery: {}
nodeRegistration:
criSocket: /var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: '{{ local_hostname }}'
kubeletExtraArgs:
cloud-provider: external
node-ip: "{{ ds.meta_data.local_ipv4 }}"
# Commands to run before kubeadm join
preKubeadmCommands:
- hostnamectl set-hostname "{{ ds.meta_data.hostname }}"
- echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts
# Commands to run after kubeadm join
postKubeadmCommands: []
# Users to create on worker nodes
users: []
```
Apply the generated manifest to create the ClusterClass:
### VSphereClusterTemplate
```bash
kubectl apply -f capi-kamaji-vsphere-class.yaml
Infrastructure-specific template for vSphere:
```yaml
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereClusterTemplate
metadata:
name: vsphere
namespace: capi-templates-vsphere
spec:
template:
spec:
server: "vcenter.sample.com" # vCenter server address
thumbprint: "" # vCenter certificate thumbprint
identityRef:
kind: VSphereClusterIdentity
name: "vsphere-cluster-identity"
failureDomainSelector: {}
clusterModules: []
```
## Creating a Cluster from a ClusterClass
### VSphereMachineTemplate
Once a ClusterClass is created, you can create a Cluster using the ClusterClass. Here is an example of a Cluster that uses the `kamaji-clusterclass`:
Machine template for vSphere workers:
```yaml
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereMachineTemplate
metadata:
name: vsphere-vm-base
namespace: capi-templates-vsphere
spec:
template:
spec:
# Resources will be patched by ClusterClass based on variables
# numCPUs, memoryMiB, diskGiB are dynamically set
# Infrastructure defaults - will be patched by ClusterClass
server: "vcenter.sample.com"
datacenter: "datacenter"
datastore: "datastore"
resourcePool: "Resources"
folder: "vm-folder"
template: "ubuntu-2404-kube-v1.32.0"
storagePolicyName: ""
thumbprint: ""
# Network configuration (IPAM by default)
network:
devices:
- networkName: "k8s-network"
dhcp4: false
addressesFromPools:
- apiGroup: ipam.cluster.x-k8s.io
kind: InClusterIPPool
name: "{{ .builtin.cluster.name }}" # Uses cluster name
```
### Variables and Patching in Cluster Class
`ClusterClass` becomes powerful through its variable system and JSON patching capabilities. This allows the same templates to be customized for different use cases without duplicating YAML.
#### Variable System
Variables in `ClusterClass` define the parameters users can customize when creating clusters. Each variable has:
- **Schema Definition**: OpenAPI v3 schema that validates input
- **Required/Optional**: Whether the variable must be provided
- **Default Values**: Fallback values when not specified
- **Type Constraints**: Data types, ranges, and enum values
Here's how variables work in practice:
**Control Plane Variables:**
```yaml
variables:
- name: kamajiControlPlane
required: true
schema:
openAPIV3Schema:
type: object
properties:
dataStoreName:
type: string
description: "Datastore name for etcd"
default: "default"
network:
type: object
properties:
serviceType:
type: string
enum: ["ClusterIP", "NodePort", "LoadBalancer"]
default: "LoadBalancer"
serviceAddress:
type: string
description: "Pre-assigned VIP address"
```
**Machine Resource Variables:**
```yaml
- name: machineSpecs
required: true
schema:
openAPIV3Schema:
type: object
properties:
numCPUs:
type: integer
minimum: 2
maximum: 64
default: 4
memoryMiB:
type: integer
minimum: 4096
maximum: 131072
default: 8192
diskGiB:
type: integer
minimum: 40
maximum: 2048
default: 100
```
#### JSON Patching System
Patches apply variable values to the base templates at cluster creation time. This enables the same template to serve different configurations.
**Control Plane Patching:**
```yaml
patches:
- name: controlPlaneConfig
definitions:
- selector:
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: KamajiControlPlaneTemplate
matchResources:
controlPlane: true
jsonPatches:
- op: replace
path: /spec/template/spec/dataStoreName
valueFrom:
variable: kamajiControlPlane.dataStoreName
- op: replace
path: /spec/template/spec/network/serviceType
valueFrom:
variable: kamajiControlPlane.network.serviceType
```
**Machine Resource Patching:**
```yaml
- name: machineResources
definitions:
- selector:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereMachineTemplate
matchResources:
machineDeploymentClass:
names: ["default-worker"]
jsonPatches:
- op: add # Resources are not in base template
path: /spec/template/spec/numCPUs
valueFrom:
variable: machineSpecs.numCPUs
- op: add
path: /spec/template/spec/memoryMiB
valueFrom:
variable: machineSpecs.memoryMiB
```
#### Advanced Patching Patterns
**Conditional Patching:**
```yaml
- name: optionalVIP
definitions:
- selector:
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: KamajiControlPlaneTemplate
jsonPatches:
- op: replace
path: /spec/template/spec/network/serviceAddress
valueFrom:
variable: kamajiControlPlane.network.serviceAddress
# Only applies if serviceAddress is not empty
enabledIf: "{{ ne .kamajiControlPlane.network.serviceAddress \"\" }}"
```
**Infrastructure Patching:**
```yaml
- name: infrastructureConfig
definitions:
- selector:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereMachineTemplate
jsonPatches:
- op: replace
path: /spec/template/spec/datacenter
valueFrom:
variable: infrastructure.datacenter
- op: replace
path: /spec/template/spec/datastore
valueFrom:
variable: infrastructure.datastore
- op: replace
path: /spec/template/spec/template
valueFrom:
variable: infrastructure.vmTemplate
```
### Complete Cluster Class with Variables
For a comprehensive example with all variables and patches configured, see the [vsphere-kamaji-clusterclass.yaml](https://raw.githubusercontent.com/clastix/cluster-api-control-plane-provider-kamaji/master/templates/vsphere/capi-kamaji-vsphere-class-template.yaml) template.
## Creating a Cluster from Cluster Class
With the `ClusterClass` defined, creating a cluster becomes remarkably simple:
```yaml
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: sample
name: my-cluster
namespace: default
spec:
# Network configuration defined at cluster level
clusterNetwork:
pods:
cidrBlocks: ["10.244.0.0/16"]
services:
cidrBlocks: ["10.96.0.0/12"]
serviceDomain: "cluster.local"
topology:
class: kamaji-clusterclass
classNamespace: capi-clusterclass
version: v1.31.0
class: vsphere-standard
classNamespace: capi-templates-vsphere
version: v1.32.0
controlPlane:
replicas: 2
workers:
machineDeployments:
- class: kamaji-clusterclass
name: md-sample
- class: default-worker
name: worker-nodes
replicas: 3
# other resources omitted for brevity ...
variables:
- name: kamajiControlPlane
value:
dataStoreName: "etcd"
network:
serviceType: "LoadBalancer"
serviceAddress: "" # Auto-assigned if empty
- name: machineSpecs
value:
numCPUs: 8
memoryMiB: 16384
diskGiB: 60
- name: infrastructure
value:
vmTemplate: "ubuntu-2404-kube-v1.32.0"
datacenter: "K8s-TI-dtc"
datastore: "K8s-N01td-01"
resourcePool: "rp-kamaji-dev"
folder: "my-cluster-vms"
- name: networking
value:
networkName: "VM-K8s-TI-cpmgmt"
nameservers: ["8.8.8.8", "1.1.1.1"]
dhcp4: false # Using IPAM
```
Always refer to the [Cluster API documentation](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/) for the most up-to-date information on ClusterClass.
Create the cluster:
```bash
kubectl apply -f my-cluster.yaml
```
Monitor cluster creation:
```bash
clusterctl describe cluster my-cluster
kubectl get cluster,kamajicontrolplane,machinedeployment -n default
```
With this approach, the same `KamajiControlPlaneTemplate` and `KubeadmConfigTemplate` can be reused when creating `ClusterClasses` for AWS, Azure, or any other provider. Only the infrastructure-specific templates need to change.
## Cross-Provider Template Reuse
One of Kamaji's key advantages with `ClusterClass` is template modularity across providers. Here's how to leverage this:
### Shared Templates Repository
Create a namespace for shared templates:
```bash
kubectl create namespace cluster-templates
```
Deploy shared Kamaji and bootstrap templates once:
```bash
kubectl apply -n cluster-templates -f kamaji-controlplane-template.yaml
kubectl apply -n cluster-templates -f kubeadm-config-template.yaml
```
### Provider-Specific Cluster Classes
For each infrastructure provider, create a `ClusterClass` that references the shared templates:
#### AWS Cluster Class
```yaml
apiVersion: cluster.x-k8s.io/v1beta1
kind: ClusterClass
metadata:
name: kamaji-aws-class
spec:
controlPlane:
ref:
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: KamajiControlPlaneTemplate
name: kamaji-controlplane
namespace: cluster-templates # Shared template
infrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
name: aws-cluster-template # AWS-specific
workers:
machineDeployments:
- class: default-worker
template:
bootstrap:
ref:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: kubeadm
namespace: cluster-templates # Shared template
infrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: aws-worker-template # AWS-specific
```
### Azure Cluster Class
```yaml
apiVersion: cluster.x-k8s.io/v1beta1
kind: ClusterClass
metadata:
name: kamaji-azure-class
spec:
controlPlane:
ref:
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: KamajiControlPlaneTemplate
name: kamaji-control-plane-template
namespace: cluster-templates # Same shared template
infrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: AzureClusterTemplate
name: azure-cluster-template # Azure-specific
workers:
machineDeployments:
- class: default-worker
template:
bootstrap:
ref:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: worker-bootstrap-template
namespace: cluster-templates # Same shared template
infrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: AzureMachineTemplate
name: azure-worker-template # Azure-specific
```
## Managing Cluster Class Lifecycle
### Listing Available Cluster Classes
```bash
kubectl get clusterclasses -A
```
### Viewing Cluster Class Details
```bash
kubectl describe clusterclass vsphere-standard -n capi-templates-vsphere
```
### Updating a Cluster Class
A `ClusterClass` update affects only new clusters. Existing clusters continue using their original configuration:
```bash
kubectl edit clusterclass vsphere-standard -n capi-templates-vsphere
```
### Deleting Clusters Created from Cluster Class
Always delete clusters before removing the `ClusterClass`:
```bash
# Delete the cluster
kubectl delete cluster my-cluster
# Wait for cleanup
kubectl wait --for=delete cluster/my-cluster --timeout=10m
# Then safe to delete ClusterClass if no longer needed
kubectl delete clusterclass vsphere-standard -n capi-templates-vsphere
```
## Template Versioning Strategies
When managing `ClusterClasses` across environments, consider these versioning approaches:
### Semantic Versioning in Names
```yaml
metadata:
name: vsphere-standard-v1-2-0
namespace: capi-templates-vsphere
```
### Using Labels for Version Tracking
```yaml
metadata:
name: vsphere-standard
namespace: capi-templates-vsphere
labels:
version: "1.2.0"
stability: "stable"
tier: "standard"
```
### Namespace Separation
```bash
kubectl create namespace clusterclass-v1
kubectl create namespace clusterclass-v2
```
This enables gradual migration between `ClusterClass` versions while maintaining compatibility.
## Further Reading
- [Cluster API ClusterClass Documentation](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/)
- [Kamaji Control Plane Provider Reference](https://doc.crds.dev/github.com/clastix/cluster-api-control-plane-provider-kamaji)
- [CAPI Provider Integration](https://github.com/clastix/cluster-api-control-plane-provider-kamaji)

View File

@@ -1,14 +1,15 @@
# Konnectivity
In traditional Kubernetes deployments, the control plane components need to communicate directly with worker nodes for various operations
like executing commands in pods, retrieving logs, or managing port forwards.
In traditional Kubernetes deployments, the control plane components need to communicate directly with worker nodes for various operations like:
executing commands in pods, retrieving logs, or managing port forwards.
However, in many real-world environments, especially those spanning multiple networks or cloud providers,
direct communication isn't always possible or desirable. This is where Konnectivity comes in.
## Understanding Konnectivity in Kamaji
Kamaji integrates [Konnectivity](https://kubernetes.io/docs/concepts/architecture/control-plane-node-communication/) as a core component of its architecture.
Each Tenant Control Plane pod includes a konnectivity-server running as a sidecar container,
Each Tenant Control Plane pod includes a `konnectivity-server` running as a sidecar container,
which establishes and maintains secure tunnels with agents running on the worker nodes.
This design ensures reliable communication even in complex network environments.
@@ -86,3 +87,68 @@ Available strategies are the following:
By integrating Konnectivity as a core feature, Kamaji ensures that your Tenant Clusters can operate reliably and securely across any network topology,
making it easier to build and manage distributed Kubernetes environments at scale.
## Version compatibility between API Server and Konnectivity
In recent Kubernetes releases, Konnectivity has aligned its versioning with the Kubernetes API Server.
This means that for example:
- Kubernetes v1.34.0 pairs with Konnectivity v0.34.0
- Kubernetes v1.33.0 pairs with Konnectivity v0.33.0
Within Kamaji, this version matching happens automatically.
The field `TenantControlPlane.spec.addons.konnectivity` determines the proper Konnectivity version for both the server and the agent,
ensuring compatibility with the tenant control plane's API Server version.
!!! warning "Konnectivity images could not be available!"
For the most recent Kubernetes releases, the corresponding Konnectivity image artifacts _may not yet be built and published_ by the upstream community.
In these cases, you may need to override the automatic pairing and configure a previous Konnectivity version that is available.
You can still have a version skew between the Kubernetes API Server for the given Tenant Control Plane, and the Konnectivity components.
```yaml
apiVersion: kamaji.clastix.io/v1alpha1
kind: TenantControlPlane
metadata:
name: konnectivity
namespace: default
spec:
addons:
coreDNS: {}
konnectivity:
agent:
hostNetwork: false
image: registry.k8s.io/kas-network-proxy/proxy-agent
mode: DaemonSet
tolerations:
- key: CriticalAddonsOnly
operator: Exists
version: v0.33.0
server:
image: registry.k8s.io/kas-network-proxy/proxy-server
port: 8132
version: v0.33.0
kubeProxy: {}
controlPlane:
deployment:
replicas: 2
service:
serviceType: LoadBalancer
dataStore: etcd-kamaji-etcd
kubernetes:
kubelet:
cgroupfs: systemd
preferredAddressTypes:
- InternalIP
- ExternalIP
- Hostname
version: v1.34.0
networkProfile:
clusterDomain: cluster.local
dnsServiceIPs:
- 10.96.0.10
podCidr: 10.244.0.0/16
port: 6443
serviceCidr: 10.96.0.0/16
```

View File

@@ -41,7 +41,7 @@ k8s-133-scheduler-kubeconfig Opaque 1 3h45m
```
Once this operation is performed, Kamaji will trigger a certificate renewal,
reporting the rotation date time as the annotation `certs.kamaji.clastix.io/rotate` value.
reporting the rotation date time as the annotation `certs.kamaji.clastix.io/rotate` value in the [RFC3339](https://pkg.go.dev/time#RFC3339) format.
```
$: kubectl annotate secret -l kamaji.clastix.io/certificate_lifecycle_controller=x509 certs.kamaji.clastix.io/rotate=""
@@ -52,11 +52,11 @@ secret/k8s-133-front-proxy-client-certificate annotated
secret/k8s-133-konnectivity-certificate annotated
$: kubectl get secrets -l kamaji.clastix.io/certificate_lifecycle_controller=x509 -ojson | jq -r '.items[] | "\(.metadata.name) rotated at \(.metadata.annotations["certs.kamaji.clastix.io/rotate"])"'
k8s-133-api-server-certificate rotated at 2025-07-15 15:15:08.842191367 +0200 CEST m=+325.785000014
k8s-133-api-server-kubelet-client-certificate rotated at 2025-07-15 15:15:10.468139865 +0200 CEST m=+327.410948506
k8s-133-datastore-certificate rotated at 2025-07-15 15:15:15.454468752 +0200 CEST m=+332.397277417
k8s-133-front-proxy-client-certificate rotated at 2025-07-15 15:15:13.279920467 +0200 CEST m=+330.222729097
k8s-133-konnectivity-certificate rotated at 2025-07-15 15:15:17.361431671 +0200 CEST m=+334.304240277
k8s-133-api-server-certificate rotated at 2025-07-15T15:15:08Z02:00
k8s-133-api-server-kubelet-client-certificate rotated at 2025-07-15T15:15:10Z0200
k8s-133-datastore-certificate rotated at 2025-07-15T15:15:15Z0200
k8s-133-front-proxy-client-certificate rotated at 2025-07-15T15:15:13Z0200
k8s-133-konnectivity-certificate rotated at 2025-07-15T15:15:17Z0200
```
You can notice the secrets have been automatically created back, as well as a TenantControlPlane rollout with the updated certificates.

File diff suppressed because it is too large Load Diff

View File

@@ -63,11 +63,11 @@ nav:
- 'Cluster API':
- cluster-api/index.md
- cluster-api/control-plane-provider.md
- cluster-api/cluster-class.md
- cluster-api/cluster-autoscaler.md
- cluster-api/vsphere-infra-provider.md
- cluster-api/proxmox-infra-provider.md
- cluster-api/other-providers.md
- cluster-api/cluster-autoscaler.md
- cluster-api/cluster-class.md
- 'Guides':
- guides/index.md
- guides/alternative-datastore.md

18
go.mod
View File

@@ -21,7 +21,7 @@ require (
github.com/prometheus/client_golang v1.23.2
github.com/spf13/cobra v1.10.1
github.com/spf13/pflag v1.0.10
github.com/spf13/viper v1.20.1
github.com/spf13/viper v1.21.0
github.com/testcontainers/testcontainers-go v0.38.0
go.etcd.io/etcd/api/v3 v3.6.4
go.etcd.io/etcd/client/v3 v3.6.4
@@ -34,9 +34,9 @@ require (
k8s.io/cluster-bootstrap v0.0.0
k8s.io/klog/v2 v2.130.1
k8s.io/kubelet v0.0.0
k8s.io/kubernetes v1.34.0
k8s.io/kubernetes v1.34.1
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
sigs.k8s.io/controller-runtime v0.22.0
sigs.k8s.io/controller-runtime v0.22.1
)
require (
@@ -82,7 +82,7 @@ require (
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-pg/zerochecker v0.2.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/golang/protobuf v1.5.4 // indirect
@@ -120,19 +120,19 @@ require (
github.com/nats-io/nuid v1.0.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/sagikazarmark/locafero v0.11.0 // indirect
github.com/shirou/gopsutil/v4 v4.25.5 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
github.com/spf13/afero v1.15.0 // indirect
github.com/spf13/cast v1.10.0 // indirect
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/stretchr/testify v1.11.1 // indirect
github.com/subosito/gotenv v1.6.0 // indirect

36
go.sum
View File

@@ -110,8 +110,8 @@ github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
@@ -251,8 +251,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -275,8 +275,8 @@ github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlT
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo=
github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k=
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil/v4 v4.25.5 h1:rtd9piuSMGeU8g1RMXjZs9y9luK5BwtnG7dZaQUJAsc=
@@ -285,19 +285,19 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=
github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4=
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -525,8 +525,8 @@ k8s.io/kube-proxy v0.34.0 h1:gU7MVbJHiXyPX8bXnod4bANtSC7rZSKkkLmM8gUqwT4=
k8s.io/kube-proxy v0.34.0/go.mod h1:tfwI8dCKm5Q0r+aVIbrq/aC36Kk936w2LZu8/rvJzWI=
k8s.io/kubelet v0.34.0 h1:1nZt1Q6Kfx7xCaTS9vnqR9sjZDxf3cRSQkAFCczULmc=
k8s.io/kubelet v0.34.0/go.mod h1:NqbF8ViVettlZbf9hw9DJhubaWn7rGvDDTcLMDm6tQ0=
k8s.io/kubernetes v1.34.0 h1:NvUrwPAVB4W3mSOpJ/RtNGHWWYyUP/xPaX5rUSpzA0w=
k8s.io/kubernetes v1.34.0/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto=
k8s.io/kubernetes v1.34.1 h1:F3p8dtpv+i8zQoebZeK5zBqM1g9x1aIdnA5vthvcuUk=
k8s.io/kubernetes v1.34.1/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto=
k8s.io/system-validators v1.10.1 h1:bIO3YRgxJkh/W3ghcd5ViXNPGmjwQKlHk/ySPdw6K00=
k8s.io/system-validators v1.10.1/go.mod h1:awfSS706v9R12VC7u7K89FKfqVy44G+E0L1A0FX9Wmw=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
@@ -535,8 +535,8 @@ mellium.im/sasl v0.3.1 h1:wE0LW6g7U83vhvxjC1IY8DnXM+EU095yeo8XClvCdfo=
mellium.im/sasl v0.3.1/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/controller-runtime v0.22.0 h1:mTOfibb8Hxwpx3xEkR56i7xSjB+nH4hZG37SrlCY5e0=
sigs.k8s.io/controller-runtime v0.22.0/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY=
sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg=
sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I=

View File

@@ -136,7 +136,7 @@ func (d Deployment) setStrategy(deployment *appsv1.DeploymentSpec, tcp kamajiv1a
if tcp.Spec.ControlPlane.Deployment.Strategy.RollingUpdate == nil {
maxSurge := intstr.FromString("100%")
maxUnavailable := intstr.FromInt(0)
maxUnavailable := intstr.FromInt32(0)
deployment.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{
MaxUnavailable: &maxUnavailable,
@@ -344,7 +344,7 @@ func (d Deployment) buildScheduler(podSpec *corev1.PodSpec, tenantControlPlane k
args["--authorization-kubeconfig"] = kubeconfig
args["--bind-address"] = "0.0.0.0"
args["--kubeconfig"] = kubeconfig
args["--leader-elect"] = "true" //nolint:goconst
args["--leader-elect"] = "true"
podSpec.Containers[index].Name = schedulerContainerName
podSpec.Containers[index].Image = tenantControlPlane.Spec.ControlPlane.Deployment.RegistrySettings.KubeSchedulerImage(tenantControlPlane.Spec.Kubernetes.Version)
@@ -354,7 +354,7 @@ func (d Deployment) buildScheduler(podSpec *corev1.PodSpec, tenantControlPlane k
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(10259),
Port: intstr.FromInt32(10259),
Scheme: corev1.URISchemeHTTPS,
},
},
@@ -368,7 +368,7 @@ func (d Deployment) buildScheduler(podSpec *corev1.PodSpec, tenantControlPlane k
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(10259),
Port: intstr.FromInt32(10259),
Scheme: corev1.URISchemeHTTPS,
},
},
@@ -411,33 +411,32 @@ func (d Deployment) buildControllerManager(podSpec *corev1.PodSpec, tenantContro
index = len(podSpec.Containers)
podSpec.Containers = append(podSpec.Containers, corev1.Container{})
}
// Configuring the arguments of the container,
// taking in consideration the extra args from the user-space.
args := map[string]string{}
if tenantControlPlane.Spec.ControlPlane.Deployment.ExtraArgs != nil {
args = utilities.ArgsFromSliceToMap(tenantControlPlane.Spec.ControlPlane.Deployment.ExtraArgs.ControllerManager)
}
kubeconfig := "/etc/kubernetes/controller-manager.conf"
args["--allocate-node-cidrs"] = "true"
args["--authentication-kubeconfig"] = kubeconfig
args["--authorization-kubeconfig"] = kubeconfig
args["--bind-address"] = "0.0.0.0"
args["--client-ca-file"] = path.Join(v1beta3.DefaultCertificatesDir, constants.CACertName)
args["--cluster-name"] = tenantControlPlane.GetName()
args["--cluster-signing-cert-file"] = path.Join(v1beta3.DefaultCertificatesDir, constants.CACertName)
args["--cluster-signing-key-file"] = path.Join(v1beta3.DefaultCertificatesDir, constants.CAKeyName)
args["--controllers"] = "*,bootstrapsigner,tokencleaner"
args["--kubeconfig"] = kubeconfig
args["--leader-elect"] = "true"
args["--service-cluster-ip-range"] = tenantControlPlane.Spec.NetworkProfile.ServiceCIDR
args["--cluster-cidr"] = tenantControlPlane.Spec.NetworkProfile.PodCIDR
args["--requestheader-client-ca-file"] = path.Join(v1beta3.DefaultCertificatesDir, constants.FrontProxyCACertName)
args["--root-ca-file"] = path.Join(v1beta3.DefaultCertificatesDir, constants.CACertName)
args["--service-account-private-key-file"] = path.Join(v1beta3.DefaultCertificatesDir, constants.ServiceAccountPrivateKeyName)
args["--use-service-account-credentials"] = "true"
args := map[string]string{
"--allocate-node-cidrs": "true",
"--authentication-kubeconfig": kubeconfig,
"--authorization-kubeconfig": kubeconfig,
"--bind-address": "0.0.0.0",
"--client-ca-file": path.Join(v1beta3.DefaultCertificatesDir, constants.CACertName),
"--cluster-name": tenantControlPlane.GetName(),
"--cluster-signing-cert-file": path.Join(v1beta3.DefaultCertificatesDir, constants.CACertName),
"--cluster-signing-key-file": path.Join(v1beta3.DefaultCertificatesDir, constants.CAKeyName),
"--controllers": "*,bootstrapsigner,tokencleaner",
"--kubeconfig": kubeconfig,
"--leader-elect": "true",
"--service-cluster-ip-range": tenantControlPlane.Spec.NetworkProfile.ServiceCIDR,
"--cluster-cidr": tenantControlPlane.Spec.NetworkProfile.PodCIDR,
"--requestheader-client-ca-file": path.Join(v1beta3.DefaultCertificatesDir, constants.FrontProxyCACertName),
"--root-ca-file": path.Join(v1beta3.DefaultCertificatesDir, constants.CACertName),
"--service-account-private-key-file": path.Join(v1beta3.DefaultCertificatesDir, constants.ServiceAccountPrivateKeyName),
"--use-service-account-credentials": "true",
}
if extraArgs := tenantControlPlane.Spec.ControlPlane.Deployment.ExtraArgs; extraArgs != nil && len(extraArgs.ControllerManager) > 0 {
args = utilities.MergeMaps(args, utilities.ArgsFromSliceToMap(extraArgs.ControllerManager))
}
podSpec.Containers[index].Name = "kube-controller-manager"
podSpec.Containers[index].Image = tenantControlPlane.Spec.ControlPlane.Deployment.RegistrySettings.KubeControllerManagerImage(tenantControlPlane.Spec.Kubernetes.Version)
@@ -447,7 +446,7 @@ func (d Deployment) buildControllerManager(podSpec *corev1.PodSpec, tenantContro
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(10257),
Port: intstr.FromInt32(10257),
Scheme: corev1.URISchemeHTTPS,
},
},
@@ -461,7 +460,7 @@ func (d Deployment) buildControllerManager(podSpec *corev1.PodSpec, tenantContro
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(10257),
Port: intstr.FromInt32(10257),
Scheme: corev1.URISchemeHTTPS,
},
},
@@ -564,7 +563,7 @@ func (d Deployment) buildKubeAPIServer(podSpec *corev1.PodSpec, tenantControlPla
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/livez",
Port: intstr.FromInt(int(tenantControlPlane.Spec.NetworkProfile.Port)),
Port: intstr.FromInt32(tenantControlPlane.Spec.NetworkProfile.Port),
Scheme: corev1.URISchemeHTTPS,
},
},
@@ -578,7 +577,7 @@ func (d Deployment) buildKubeAPIServer(podSpec *corev1.PodSpec, tenantControlPla
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/readyz",
Port: intstr.FromInt(int(tenantControlPlane.Spec.NetworkProfile.Port)),
Port: intstr.FromInt32(tenantControlPlane.Spec.NetworkProfile.Port),
Scheme: corev1.URISchemeHTTPS,
},
},
@@ -592,7 +591,7 @@ func (d Deployment) buildKubeAPIServer(podSpec *corev1.PodSpec, tenantControlPla
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/livez",
Port: intstr.FromInt(int(tenantControlPlane.Spec.NetworkProfile.Port)),
Port: intstr.FromInt32(tenantControlPlane.Spec.NetworkProfile.Port),
Scheme: corev1.URISchemeHTTPS,
},
},

View File

@@ -6,6 +6,7 @@ package controlplane
import (
"fmt"
"github.com/blang/semver"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -33,7 +34,20 @@ type Konnectivity struct {
Scheme runtime.Scheme
}
func (k Konnectivity) buildKonnectivityContainer(addon *kamajiv1alpha1.KonnectivitySpec, replicas int32, podSpec *corev1.PodSpec) {
func (k Konnectivity) serverVersion(tcpVersion, addonVersion string) string {
if addonVersion != "" {
return addonVersion
}
version, parsedErr := semver.ParseTolerant(tcpVersion)
if parsedErr != nil {
return ""
}
return fmt.Sprintf("v0.%d.0", version.Minor)
}
func (k Konnectivity) buildKonnectivityContainer(tcpVersion string, addon *kamajiv1alpha1.KonnectivitySpec, replicas int32, podSpec *corev1.PodSpec) {
found, index := utilities.HasNamedContainer(podSpec.Containers, konnectivityServerName)
if !found {
index = len(podSpec.Containers)
@@ -41,7 +55,7 @@ func (k Konnectivity) buildKonnectivityContainer(addon *kamajiv1alpha1.Konnectiv
}
podSpec.Containers[index].Name = konnectivityServerName
podSpec.Containers[index].Image = fmt.Sprintf("%s:%s", addon.KonnectivityServerSpec.Image, addon.KonnectivityServerSpec.Version)
podSpec.Containers[index].Image = fmt.Sprintf("%s:%s", addon.KonnectivityServerSpec.Image, k.serverVersion(tcpVersion, addon.KonnectivityServerSpec.Version))
podSpec.Containers[index].Command = []string{"/proxy-server"}
args := utilities.ArgsFromSliceToMap(addon.KonnectivityServerSpec.ExtraArgs)
@@ -70,7 +84,7 @@ func (k Konnectivity) buildKonnectivityContainer(addon *kamajiv1alpha1.Konnectiv
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(8134),
Port: intstr.FromInt32(8134),
Scheme: corev1.URISchemeHTTP,
},
},
@@ -254,7 +268,7 @@ func (k Konnectivity) buildVolumes(status kamajiv1alpha1.KonnectivityStatus, pod
}
func (k Konnectivity) Build(deployment *appsv1.Deployment, tenantControlPlane kamajiv1alpha1.TenantControlPlane) {
k.buildKonnectivityContainer(tenantControlPlane.Spec.Addons.Konnectivity, *tenantControlPlane.Spec.ControlPlane.Deployment.Replicas, &deployment.Spec.Template.Spec)
k.buildKonnectivityContainer(tenantControlPlane.Spec.Kubernetes.Version, tenantControlPlane.Spec.Addons.Konnectivity, *tenantControlPlane.Spec.ControlPlane.Deployment.Replicas, &deployment.Spec.Template.Spec)
k.buildVolumeMounts(&deployment.Spec.Template.Spec)
k.buildVolumes(tenantControlPlane.Status.Addons.Konnectivity, &deployment.Spec.Template.Spec)

View File

@@ -309,7 +309,7 @@ func (c *CoreDNS) mutateDeployment(ctx context.Context, tenantClient client.Clie
if err := controllerutil.SetControllerReference(c.clusterRoleBinding, c.deployment, tenantClient.Scheme()); err != nil {
return controllerutil.OperationResultNone, err
}
//nolint:staticcheck
return controllerutil.OperationResultNone, tenantClient.Patch(ctx, c.deployment, client.Apply, client.FieldOwner("kamaji"), client.ForceOwnership)
}
@@ -345,7 +345,7 @@ func (c *CoreDNS) mutateService(ctx context.Context, tenantClient client.Client)
if err := controllerutil.SetControllerReference(c.clusterRoleBinding, c.service, tenantClient.Scheme()); err != nil {
return controllerutil.OperationResultNone, err
}
//nolint:staticcheck
return controllerutil.OperationResultNone, tenantClient.Patch(ctx, c.service, client.Apply, client.FieldOwner("kamaji"), client.ForceOwnership)
}

View File

@@ -314,7 +314,7 @@ func (k *KubeProxy) mutateDaemonSet(ctx context.Context, tenantClient client.Cli
if err := controllerutil.SetControllerReference(k.clusterRoleBinding, k.daemonSet, tenantClient.Scheme()); err != nil {
return controllerutil.OperationResultNone, err
}
//nolint:staticcheck
return controllerutil.OperationResultNone, tenantClient.Patch(ctx, k.daemonSet, client.Apply, client.FieldOwner("kamaji"), client.ForceOwnership)
}

View File

@@ -100,7 +100,7 @@ func (r *KubernetesServiceResource) mutate(ctx context.Context, tenantControlPla
r.resource.Spec.Ports[0].Name = "kube-apiserver"
r.resource.Spec.Ports[0].Protocol = corev1.ProtocolTCP
r.resource.Spec.Ports[0].Port = tenantControlPlane.Spec.NetworkProfile.Port
r.resource.Spec.Ports[0].TargetPort = intstr.FromInt(int(tenantControlPlane.Spec.NetworkProfile.Port))
r.resource.Spec.Ports[0].TargetPort = intstr.FromInt32(tenantControlPlane.Spec.NetworkProfile.Port)
switch tenantControlPlane.Spec.ControlPlane.Service.ServiceType {
case kamajiv1alpha1.ServiceTypeLoadBalancer:

View File

@@ -7,6 +7,7 @@ import (
"context"
"fmt"
"github.com/blang/semver"
"github.com/prometheus/client_golang/prometheus"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -36,6 +37,19 @@ func (r *Agent) GetHistogram() prometheus.Histogram {
return agentCollector
}
func (r *Agent) agentVersion(tcp *kamajiv1alpha1.TenantControlPlane) string {
if tcp.Spec.Addons.Konnectivity.KonnectivityAgentSpec.Version != "" {
return tcp.Spec.Addons.Konnectivity.KonnectivityAgentSpec.Version
}
version, parsedErr := semver.ParseTolerant(tcp.Spec.Kubernetes.Version)
if parsedErr != nil {
return ""
}
return fmt.Sprintf("v0.%d.0", version.Minor)
}
func (r *Agent) ShouldStatusBeUpdated(_ context.Context, tcp *kamajiv1alpha1.TenantControlPlane) bool {
return tcp.Spec.Addons.Konnectivity == nil && (tcp.Status.Addons.Konnectivity.Agent.Namespace != "" || tcp.Status.Addons.Konnectivity.Agent.Name != "") ||
tcp.Spec.Addons.Konnectivity != nil && (tcp.Status.Addons.Konnectivity.Agent.Namespace != r.resource.GetNamespace() || tcp.Status.Addons.Konnectivity.Agent.Name != r.resource.GetName()) ||
@@ -219,7 +233,7 @@ func (r *Agent) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.T
podTemplateSpec.Spec.Containers = make([]corev1.Container, 1)
}
podTemplateSpec.Spec.Containers[0].Image = fmt.Sprintf("%s:%s", tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityAgentSpec.Image, tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityAgentSpec.Version)
podTemplateSpec.Spec.Containers[0].Image = fmt.Sprintf("%s:%s", tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityAgentSpec.Image, r.agentVersion(tenantControlPlane))
podTemplateSpec.Spec.Containers[0].Name = AgentName
podTemplateSpec.Spec.Containers[0].Command = []string{"/proxy-agent"}

View File

@@ -4,6 +4,8 @@
package utilities
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -34,7 +36,7 @@ func SetLastRotationTimestamp(obj client.Object) {
annotations = map[string]string{}
}
annotations[RotateCertificateRequestAnnotation] = metav1.Now().String()
annotations[RotateCertificateRequestAnnotation] = metav1.Now().Format(time.RFC3339)
obj.SetAnnotations(annotations)
}