mirror of
https://github.com/clastix/kamaji.git
synced 2026-02-22 05:49:52 +00:00
Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8e8ee92fb2 | ||
|
|
f3be9e5442 | ||
|
|
fb296267f6 | ||
|
|
751ce3722b | ||
|
|
d99ffb0334 | ||
|
|
f831f385c4 | ||
|
|
f301c9bdc2 | ||
|
|
0909529e6b | ||
|
|
d0aacd03f6 | ||
|
|
f4c84946c0 | ||
|
|
2c72369b99 | ||
|
|
abcc662c96 | ||
|
|
792119d2d3 | ||
|
|
f0e675dea3 | ||
|
|
4413061640 | ||
|
|
8f57ff407e | ||
|
|
94f2d9074d | ||
|
|
fadcc219ec | ||
|
|
af5ac4acab | ||
|
|
069afd9b17 | ||
|
|
6741194034 | ||
|
|
7acba20056 | ||
|
|
0d2cf784f5 | ||
|
|
4db8230912 |
10
.github/workflows/docker-ci.yml
vendored
10
.github/workflows/docker-ci.yml
vendored
@@ -12,6 +12,8 @@ jobs:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Generate build-args
|
||||
id: build-args
|
||||
@@ -85,7 +87,13 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64,linux/arm
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
build-args:
|
||||
build-args: |
|
||||
GIT_LAST_TAG=${{ env.GIT_LAST_TAG }}
|
||||
GIT_HEAD_COMMIT=${{ env.GIT_HEAD_COMMIT }}
|
||||
GIT_TAG_COMMIT=${{ env.GIT_TAG_COMMIT }}
|
||||
GIT_MODIFIED=${{ env.GIT_MODIFIED }}
|
||||
GIT_REPO=${{ env.GIT_REPO }}
|
||||
BUILD_DATE=${{ env.BUILD_DATE }}
|
||||
|
||||
- name: Image digest
|
||||
run: echo ${{ steps.build-release.outputs.digest }}
|
||||
|
||||
2
Makefile
2
Makefile
@@ -3,7 +3,7 @@
|
||||
# To re-generate a bundle for another specific version without changing the standard setup, you can:
|
||||
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
|
||||
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
|
||||
VERSION ?= 0.3.0
|
||||
VERSION ?= 0.3.2
|
||||
|
||||
# CHANNELS define the bundle channels used in the bundle.
|
||||
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
- [x] Seamless migration between datastores
|
||||
- [ ] Automatic assignment to a datastore
|
||||
- [ ] Autoscaling of Tenant Control Plane
|
||||
- [ ] Provisioning through Cluster APIs
|
||||
- [x] Provisioning through Cluster APIs
|
||||
- [ ] Terraform provider
|
||||
- [ ] Custom Prometheus metrics for monitoring and alerting
|
||||
|
||||
|
||||
BIN
assets/logo-colored.png
Normal file
BIN
assets/logo-colored.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 27 KiB |
@@ -1,10 +1,8 @@
|
||||
apiVersion: v2
|
||||
appVersion: v0.3.0
|
||||
description: Kamaji is a tool aimed to build and operate a Managed Kubernetes Service
|
||||
with a fraction of the operational burden. With Kamaji, you can deploy and operate
|
||||
hundreds of Kubernetes clusters as a hyper-scaler.
|
||||
appVersion: v0.3.2
|
||||
description: Kamaji deploys and operates Kubernetes at scale with a fraction of the operational burden. Kamaji turns any Kubernetes cluster into an “admin cluster” to orchestrate other Kubernetes clusters called “tenant clusters”. Kamaji is special because the Control Plane components are running in a single pod instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
|
||||
home: https://github.com/clastix/kamaji
|
||||
icon: https://github.com/clastix/kamaji/raw/master/assets/kamaji-logo.png
|
||||
icon: https://github.com/clastix/kamaji/raw/master/assets/logo-colored.png
|
||||
kubeVersion: ">=1.21.0-0"
|
||||
maintainers:
|
||||
- email: dario@tranchitella.eu
|
||||
@@ -17,8 +15,8 @@ name: kamaji
|
||||
sources:
|
||||
- https://github.com/clastix/kamaji
|
||||
type: application
|
||||
version: 0.12.0
|
||||
version: 0.12.3
|
||||
annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
catalog.cattle.io/release-name: kamaji
|
||||
catalog.cattle.io/display-name: Kamaji - Managed Kubernetes Service
|
||||
catalog.cattle.io/display-name: Kamaji
|
||||
@@ -1,8 +1,8 @@
|
||||
# kamaji
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler.
|
||||
Kamaji deploys and operates Kubernetes at scale with a fraction of the operational burden. Kamaji turns any Kubernetes cluster into an “admin cluster” to orchestrate other Kubernetes clusters called “tenant clusters”. Kamaji is special because the Control Plane components are running in a single pod instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
|
||||
|
||||
## Maintainers
|
||||
|
||||
|
||||
@@ -1,30 +1,12 @@
|
||||
# Kamaji - Managed Kubernetes Service
|
||||
# Kamaji
|
||||
|
||||
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden.
|
||||
Kamaji deploys and operates Kubernetes at scale with a fraction of the operational burden.
|
||||
|
||||
Useful links:
|
||||
- [Kamaji Github repository](https://github.com/clastix/kamaji)
|
||||
- [Kamaji Documentation](https://github.com/clastix/kamaji/docs/)
|
||||
- [Kamaji Documentation](https://kamaji.clastix.io)
|
||||
|
||||
## Requirements
|
||||
|
||||
* Kubernetes v1.22+
|
||||
* Helm v3
|
||||
|
||||
# Installation
|
||||
|
||||
To install the Chart with the release name `kamaji`:
|
||||
|
||||
helm upgrade --install --namespace kamaji-system --create-namespace clastix/kamaji
|
||||
|
||||
Show the status:
|
||||
|
||||
helm status kamaji -n kamaji-system
|
||||
|
||||
Upgrade the Chart
|
||||
|
||||
helm upgrade kamaji -n kamaji-system clastix/kamaji
|
||||
|
||||
Uninstall the Chart
|
||||
|
||||
helm uninstall kamaji -n kamaji-system
|
||||
* Helm v3
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
goRuntime "runtime"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
@@ -33,18 +34,19 @@ import (
|
||||
func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
// CLI flags
|
||||
var (
|
||||
metricsBindAddress string
|
||||
healthProbeBindAddress string
|
||||
leaderElect bool
|
||||
tmpDirectory string
|
||||
kineImage string
|
||||
datastore string
|
||||
managerNamespace string
|
||||
managerServiceAccountName string
|
||||
managerServiceName string
|
||||
webhookCABundle []byte
|
||||
migrateJobImage string
|
||||
maxConcurrentReconciles int
|
||||
metricsBindAddress string
|
||||
healthProbeBindAddress string
|
||||
leaderElect bool
|
||||
tmpDirectory string
|
||||
kineImage string
|
||||
controllerReconcileTimeout time.Duration
|
||||
datastore string
|
||||
managerNamespace string
|
||||
managerServiceAccountName string
|
||||
managerServiceName string
|
||||
webhookCABundle []byte
|
||||
migrateJobImage string
|
||||
maxConcurrentReconciles int
|
||||
|
||||
webhookCAPath string
|
||||
)
|
||||
@@ -73,6 +75,10 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
if controllerReconcileTimeout.Seconds() == 0 {
|
||||
return fmt.Errorf("the controller reconcile timeout must be greater than zero")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -111,6 +117,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
Client: mgr.GetClient(),
|
||||
APIReader: mgr.GetAPIReader(),
|
||||
Config: controllers.TenantControlPlaneReconcilerConfig{
|
||||
ReconcileTimeout: controllerReconcileTimeout,
|
||||
DefaultDataStoreName: datastore,
|
||||
KineContainerImage: kineImage,
|
||||
TmpBaseDirectory: tmpDirectory,
|
||||
@@ -149,6 +156,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
handlers.TenantControlPlaneDefaults{DefaultDatastore: datastore},
|
||||
},
|
||||
routes.TenantControlPlaneValidate{}: {
|
||||
handlers.TenantControlPlaneName{},
|
||||
handlers.TenantControlPlaneVersion{},
|
||||
handlers.TenantControlPlaneKubeletAddresses{},
|
||||
handlers.TenantControlPlaneDataStore{Client: mgr.GetClient()},
|
||||
@@ -230,6 +238,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
cmd.Flags().StringVar(&managerServiceName, "webhook-service-name", "kamaji-webhook-service", "The Kamaji webhook server Service name which is used to get validation webhooks, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().StringVar(&managerServiceAccountName, "serviceaccount-name", os.Getenv("SERVICE_ACCOUNT"), "The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().StringVar(&webhookCAPath, "webhook-ca-path", "/tmp/k8s-webhook-server/serving-certs/ca.crt", "Path to the Manager webhook server CA, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().DurationVar(&controllerReconcileTimeout, "controller-reconcile-timeout", 30*time.Second, "The reconciliation request timeout before the controller withdraw the external resource calls, such as dealing with the Datastore, or the Tenant Control Plane API endpoint.")
|
||||
|
||||
cobra.OnInitialize(func() {
|
||||
viper.AutomaticEnv()
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
appsv1 "k8s.io/kubernetes/pkg/apis/apps/v1"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -26,6 +27,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
utilruntime.Must(kamajiv1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(appsv1.RegisterDefaults(scheme))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5062,7 +5062,7 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.serviceAccountName
|
||||
image: clastix/kamaji:v0.3.0
|
||||
image: clastix/kamaji:v0.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -13,4 +13,4 @@ kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: clastix/kamaji
|
||||
newTag: v0.3.0
|
||||
newTag: v0.3.2
|
||||
|
||||
@@ -56,6 +56,7 @@ type TenantControlPlaneReconciler struct {
|
||||
|
||||
// TenantControlPlaneReconcilerConfig gives the necessary configuration for TenantControlPlaneReconciler.
|
||||
type TenantControlPlaneReconcilerConfig struct {
|
||||
ReconcileTimeout time.Duration
|
||||
DefaultDataStoreName string
|
||||
KineContainerImage string
|
||||
TmpBaseDirectory string
|
||||
@@ -74,6 +75,10 @@ type TenantControlPlaneReconcilerConfig struct {
|
||||
func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := log.FromContext(ctx)
|
||||
|
||||
var cancelFn context.CancelFunc
|
||||
ctx, cancelFn = context.WithTimeout(ctx, r.Config.ReconcileTimeout)
|
||||
defer cancelFn()
|
||||
|
||||
tenantControlPlane, err := r.getTenantControlPlane(ctx, req.NamespacedName)()
|
||||
if err != nil {
|
||||
if apimachineryerrors.IsNotFound(err) {
|
||||
|
||||
@@ -20,9 +20,13 @@ Kamaji offers a [Custom Resource Definition](https://kubernetes.io/docs/tasks/ex
|
||||
All the _“tenant clusters”_ built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves. See [CNCF compliance](reference/conformance.md).
|
||||
|
||||
## Tenant worker nodes
|
||||
And what about the tenant worker nodes? They are just _"worker nodes"_, i.e. regular virtual or bare metal machines, connecting to the APIs server of the Tenant Control Plane. Kamaji's goal is to manage the lifecycle of hundreds of these _“tenant clusters”_, not only one, so how to add another tenant cluster to Kamaji? As you could expect, you have just deploys a new Tenant Control Plane in one of the _“admin cluster”_ namespace, and then joins the tenant worker nodes to it.
|
||||
|
||||
We have in roadmap, the Cluster APIs support as well as a Terraform provider so that you can create _“tenant clusters”_ in a declarative way.
|
||||
And what about the tenant worker nodes?
|
||||
They are just _"worker nodes"_, i.e. regular virtual or bare metal machines, connecting to the APIs server of the Tenant Control Plane.
|
||||
Kamaji's goal is to manage the lifecycle of hundreds of these _“tenant clusters”_, not only one, so how to add another tenant cluster to Kamaji?
|
||||
As you could expect, you have just deploys a new Tenant Control Plane in one of the _“admin cluster”_ namespace, and then joins the tenant worker nodes to it.
|
||||
|
||||
A [Cluster API ControlPlane provider](https://github.com/clastix/cluster-api-control-plane-provider-kamaji) has been released, allowing to offer a Cluster API-native declarative lifecycle, by automating the worker nodes join.
|
||||
|
||||
## Datastores
|
||||
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each tenant cluster saves the state to be able to store and retrieve data. As we can deploy a Kubernetes cluster with an external `etcd` cluster, we explored this option for the Tenant Control Planes. On the admin cluster, you can deploy one or multi-tenant `etcd` to save the state of multiple tenant clusters. Kamaji offers a Custom Resource Definition called `DataStore` to provide a declarative approach of managing multiple datastores. By sharing the datastore between multiple tenants, the resiliency is still guaranteed and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that you have to operate external datastores, in addition to `etcd` of the _“admin cluster”_ and manage the access to be sure that each _“tenant cluster”_ uses only its data.
|
||||
|
||||
@@ -73,7 +73,7 @@ helm install \
|
||||
|
||||
## Install Kamaji Controller
|
||||
|
||||
Installing Kamaji via Helm charts is the preferred way. The Kamaji controller needs to access a Datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unamanaged `etcd` as datastore, out of box.
|
||||
Installing Kamaji via Helm charts is the preferred way. The Kamaji controller needs to access a Datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unmanaged `etcd` as datastore, out of box.
|
||||
|
||||
Install Kamaji with `helm` using an unmanaged `etcd` as default datastore:
|
||||
|
||||
@@ -240,10 +240,11 @@ And make sure it is `${TENANT_ADDR}:${TENANT_PORT}`.
|
||||
|
||||
### Prepare worker nodes to join
|
||||
|
||||
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`.
|
||||
Currently, Kamaji does not provide any helper for creation of tenant worker nodes.
|
||||
You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`.
|
||||
|
||||
!!! note "Cluster APIs support"
|
||||
In the future, we'll provide creation of tenant clusters through Cluster APIs.
|
||||
Kamaji is sticking to the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api) project contracts by providing a `ControlPlane` provider.
|
||||
Please, refer to the [official repository](https://github.com/clastix/cluster-api-control-plane-provider-kamaji) to learn more about it.
|
||||
|
||||
You can use the provided helper script `/deploy/nodes-prerequisites.sh`, in order to install the dependencies on all the worker nodes:
|
||||
|
||||
|
||||
@@ -114,7 +114,7 @@ helm install \
|
||||
|
||||
## Install Kamaji Controller
|
||||
|
||||
Installing Kamaji via Helm charts is the preferred way. The Kamaji controller needs to access a Datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unamanaged `etcd` as datastore, out of box.
|
||||
Installing Kamaji via Helm charts is the preferred way. The Kamaji controller needs to access a Datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unmanaged `etcd` as datastore, out of box.
|
||||
|
||||
Install Kamaji with `helm` using an unmanaged `etcd` as default datastore:
|
||||
|
||||
@@ -274,10 +274,11 @@ kubernetes 10.240.0.100:6443 57m
|
||||
|
||||
### Prepare worker nodes to join
|
||||
|
||||
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`.
|
||||
Currently, Kamaji does not provide any helper for creation of tenant worker nodes.
|
||||
You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`.
|
||||
|
||||
!!! note "Cluster APIs support"
|
||||
In the future, we'll provide creation of tenant clusters through Cluster APIs.
|
||||
Kamaji is sticking to the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api) project contracts by providing a `ControlPlane` provider.
|
||||
An Azure-based cluster is not yet available: the available road-map is available on the [official repository](https://github.com/clastix/cluster-api-control-plane-provider-kamaji).
|
||||
|
||||
Create an Azure VM Stateful Set to host worker nodes
|
||||
|
||||
|
||||
@@ -27,6 +27,9 @@ spec:
|
||||
```
|
||||
|
||||
## Upgrade of Tenant Worker Nodes
|
||||
As currently Kamaji is not providing any helpers for Tenant Worker Nodes, you should make sure to upgrade them manually, for example, with the help of `kubeadm`. Refer to the official [documentation](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/#upgrade-worker-nodes).
|
||||
|
||||
> We have in roadmap, the Cluster APIs support so that you can upgrade _“tenant clusters”_ in a fully declarative way.
|
||||
As currently Kamaji is not providing any helpers for Tenant Worker Nodes, you should make sure to upgrade them manually, for example, with the help of `kubeadm`.
|
||||
Refer to the official [documentation](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/#upgrade-worker-nodes).
|
||||
|
||||
Kamaji is offering a [Cluster API Control Plane provider](https://github.com/clastix/cluster-api-control-plane-provider-kamaji), thus integrating with the Kubernetes clusters declarative management approach.
|
||||
You can refer to the official [Cluster API documentation](https://cluster-api.sigs.k8s.io/).
|
||||
|
||||
@@ -2,9 +2,11 @@
|
||||
|
||||
In Kamaji, there are different components that might require independent versioning and support level:
|
||||
|
||||
|Kamaji |Admin Cluster| Tenant Cluster |
|
||||
|-------|-------------|----------------------|
|
||||
| v0.0 | v1.22+ | [v1.21.0 .. v1.23.5] |
|
||||
| v0.1 | v1.22+ | [v1.21.0 .. v1.25.0] |
|
||||
| v0.2 | v1.22+ | [v1.21.0 .. v1.27.0] |
|
||||
|
||||
| Kamaji | Admin Cluster | Tenant Cluster |
|
||||
|--------|---------------|----------------------|
|
||||
| v0.0 | v1.22+ | [v1.21.0 .. v1.23.5] |
|
||||
| v0.1 | v1.22+ | [v1.21.0 .. v1.25.0] |
|
||||
| v0.2 | v1.22+ | [v1.21.0 .. v1.27.0] |
|
||||
| v0.3.0 | v1.22+ | [v1.21.0 .. v1.27.0] |
|
||||
| v0.3.1 | v1.22+ | [v1.21.0 .. v1.27.3] |
|
||||
| v0.3.2 | v1.22+ | [v1.21.0 .. v1.27.3] |
|
||||
|
||||
@@ -35,7 +35,6 @@ func CreateKubeadmInitConfiguration(params Parameters) (*Configuration, error) {
|
||||
AdvertiseAddress: params.TenantControlPlaneAddress,
|
||||
BindPort: params.TenantControlPlanePort,
|
||||
}
|
||||
conf.NodeRegistration.Name = params.TenantControlPlaneName
|
||||
|
||||
caFile, certFile, keyFile := "", "", ""
|
||||
if strings.HasPrefix(params.ETCDs[0], "https") {
|
||||
|
||||
@@ -4,6 +4,9 @@
|
||||
package kubeadm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
@@ -19,6 +22,14 @@ import (
|
||||
"github.com/clastix/kamaji/internal/utilities"
|
||||
)
|
||||
|
||||
const (
|
||||
// kubeletConfigMapName defines base kubelet configuration ConfigMap name for kubeadm < 1.24.
|
||||
kubeletConfigMapName = "kubelet-config-%d.%d"
|
||||
)
|
||||
|
||||
// minVerUnversionedKubeletConfig defines minimum version from which kubeadm uses kubelet-config as a ConfigMap name.
|
||||
var minVerUnversionedKubeletConfig = semver.MustParse("1.24.0")
|
||||
|
||||
func UploadKubeadmConfig(client kubernetes.Interface, config *Configuration) ([]byte, error) {
|
||||
return nil, uploadconfig.UploadConfiguration(&config.InitConfiguration, client)
|
||||
}
|
||||
@@ -34,7 +45,10 @@ func UploadKubeletConfig(client kubernetes.Interface, config *Configuration) ([]
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configMapName := kubeadmconstants.KubeletBaseConfigurationConfigMap
|
||||
configMapName, err := generateKubeletConfigMapName(config.Parameters.TenantControlPlaneVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configMap := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -50,7 +64,7 @@ func UploadKubeletConfig(client kubernetes.Interface, config *Configuration) ([]
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = createConfigMapRBACRules(client); err != nil {
|
||||
if err = createConfigMapRBACRules(client, configMapName); err != nil {
|
||||
return nil, errors.Wrap(err, "error creating kubelet configuration configmap RBAC rules")
|
||||
}
|
||||
|
||||
@@ -114,7 +128,7 @@ func getKubeletConfigmapContent(kubeletConfiguration KubeletConfiguration) ([]by
|
||||
return utilities.EncodeToYaml(&kc)
|
||||
}
|
||||
|
||||
func createConfigMapRBACRules(client kubernetes.Interface) error {
|
||||
func createConfigMapRBACRules(client kubernetes.Interface, configMapName string) error {
|
||||
configMapRBACName := kubeadmconstants.KubeletBaseConfigMapRole
|
||||
|
||||
if err := apiclient.CreateOrUpdateRole(client, &rbacv1.Role{
|
||||
@@ -127,7 +141,7 @@ func createConfigMapRBACRules(client kubernetes.Interface) error {
|
||||
Verbs: []string{"get"},
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"configmaps"},
|
||||
ResourceNames: []string{kubeadmconstants.KubeletBaseConfigurationConfigMap},
|
||||
ResourceNames: []string{configMapName},
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
@@ -156,3 +170,17 @@ func createConfigMapRBACRules(client kubernetes.Interface) error {
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func generateKubeletConfigMapName(version string) (string, error) {
|
||||
parsedVersion, err := semver.ParseTolerant(version)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse kubernetes version %q", version)
|
||||
}
|
||||
|
||||
majorMinor := semver.Version{Major: parsedVersion.Major, Minor: parsedVersion.Minor}
|
||||
if majorMinor.GTE(minVerUnversionedKubeletConfig) {
|
||||
return kubeadmconstants.KubeletBaseConfigurationConfigMap, nil
|
||||
}
|
||||
|
||||
return fmt.Sprintf(kubeletConfigMapName, parsedVersion.Major, parsedVersion.Minor), nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
@@ -96,6 +97,13 @@ func (r *CACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1
|
||||
if err != nil {
|
||||
logger.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.CACertAndKeyBaseName, err.Error()))
|
||||
}
|
||||
// Appending the Cluster API required keys if they're missing:
|
||||
// with this we're sure to avoid introducing breaking changes.
|
||||
if isValid && (!bytes.Equal(r.resource.Data[corev1.TLSCertKey], r.resource.Data[kubeadmconstants.CACertName]) || !bytes.Equal(r.resource.Data[kubeadmconstants.CAKeyName], r.resource.Data[corev1.TLSPrivateKeyKey])) {
|
||||
r.resource.Data[corev1.TLSCertKey] = r.resource.Data[kubeadmconstants.CACertName]
|
||||
r.resource.Data[corev1.TLSPrivateKeyKey] = r.resource.Data[kubeadmconstants.CAKeyName]
|
||||
}
|
||||
|
||||
if isValid {
|
||||
return nil
|
||||
}
|
||||
@@ -122,6 +130,11 @@ func (r *CACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1
|
||||
r.resource.Data = map[string][]byte{
|
||||
kubeadmconstants.CACertName: ca.Certificate,
|
||||
kubeadmconstants.CAKeyName: ca.PrivateKey,
|
||||
// Required for Cluster API integration which is reading the basic TLS keys.
|
||||
// We cannot switch over basic corev1.Secret keys for backward compatibility,
|
||||
// it would require a new CA generation breaking all the clusters deployed.
|
||||
corev1.TLSCertKey: ca.Certificate,
|
||||
corev1.TLSPrivateKeyKey: ca.PrivateKey,
|
||||
}
|
||||
|
||||
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
|
||||
|
||||
@@ -6,6 +6,7 @@ package datastore
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -94,8 +95,9 @@ func (r *Config) mutate(_ context.Context, tenantControlPlane *kamajiv1alpha1.Te
|
||||
if len(fromStatus) > 0 {
|
||||
return []byte(fromStatus)
|
||||
}
|
||||
|
||||
return []byte(fmt.Sprintf("%s_%s", tenantControlPlane.GetNamespace(), tenantControlPlane.GetName()))
|
||||
// The dash character (-) must be replaced with an underscore, PostgreSQL is complaining about it:
|
||||
// https://github.com/clastix/kamaji/issues/328
|
||||
return []byte(strings.ReplaceAll(fmt.Sprintf("%s_%s", tenantControlPlane.GetNamespace(), tenantControlPlane.GetName()), "-", "_"))
|
||||
}
|
||||
|
||||
r.resource.Data = map[string][]byte{
|
||||
|
||||
@@ -142,6 +142,9 @@ func (r *ServiceResource) mutate(_ context.Context, tenantControlPlane *kamajiv1
|
||||
r.resource.Spec.Ports[1].Protocol = corev1.ProtocolTCP
|
||||
r.resource.Spec.Ports[1].Port = tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityServerSpec.Port
|
||||
r.resource.Spec.Ports[1].TargetPort = intstr.FromInt(int(tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityServerSpec.Port))
|
||||
if tenantControlPlane.Spec.ControlPlane.Service.ServiceType == kamajiv1alpha1.ServiceTypeNodePort {
|
||||
r.resource.Spec.Ports[1].NodePort = tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityServerSpec.Port
|
||||
}
|
||||
|
||||
return controllerutil.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
|
||||
}
|
||||
|
||||
@@ -117,10 +117,10 @@ func (r *KubeconfigResource) CreateOrUpdate(ctx context.Context, tenantControlPl
|
||||
return utilities.CreateOrUpdateWithConflict(ctx, r.Client, r.resource, r.mutate(ctx, tenantControlPlane))
|
||||
}
|
||||
|
||||
func (r *KubeconfigResource) checksum(apiServerCertificatesSecret *corev1.Secret, kubeadmChecksum string) string {
|
||||
func (r *KubeconfigResource) checksum(caCertificatesSecret *corev1.Secret, kubeadmChecksum string) string {
|
||||
return utilities.CalculateMapChecksum(map[string][]byte{
|
||||
"ca-cert-checksum": apiServerCertificatesSecret.Data[kubeadmconstants.CACertName],
|
||||
"ca-key-checksum": apiServerCertificatesSecret.Data[kubeadmconstants.CAKeyName],
|
||||
"ca-cert-checksum": caCertificatesSecret.Data[kubeadmconstants.CACertName],
|
||||
"ca-key-checksum": caCertificatesSecret.Data[kubeadmconstants.CAKeyName],
|
||||
"kubeadmconfig": []byte(kubeadmChecksum),
|
||||
})
|
||||
}
|
||||
@@ -142,15 +142,15 @@ func (r *KubeconfigResource) mutate(ctx context.Context, tenantControlPlane *kam
|
||||
return err
|
||||
}
|
||||
|
||||
apiServerCertificatesSecretNamespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: tenantControlPlane.Status.Certificates.CA.SecretName}
|
||||
apiServerCertificatesSecret := &corev1.Secret{}
|
||||
if err := r.Client.Get(ctx, apiServerCertificatesSecretNamespacedName, apiServerCertificatesSecret); err != nil {
|
||||
caSecretNamespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: tenantControlPlane.Status.Certificates.CA.SecretName}
|
||||
caCertificatesSecret := &corev1.Secret{}
|
||||
if err = r.Client.Get(ctx, caSecretNamespacedName, caCertificatesSecret); err != nil {
|
||||
logger.Error(err, "cannot retrieve the CA")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
checksum := r.checksum(apiServerCertificatesSecret, config.Checksum())
|
||||
checksum := r.checksum(caCertificatesSecret, config.Checksum())
|
||||
|
||||
status, err := r.getKubeconfigStatus(tenantControlPlane)
|
||||
if err != nil {
|
||||
@@ -158,26 +158,28 @@ func (r *KubeconfigResource) mutate(ctx context.Context, tenantControlPlane *kam
|
||||
|
||||
return err
|
||||
}
|
||||
// A new kubeconfig must be generated when one of the following cases is occurring:
|
||||
// 1. the status checksum is different from the computed one
|
||||
// 2. the resource UID is empty, meaning it's a new resource (tl;dr; a first reconciliation)
|
||||
//
|
||||
// And finally, we're checking if the kubeconfig is valid: if not, generating a new one.
|
||||
if (status.Checksum != checksum || len(r.resource.UID) == 0) && !kubeadm.IsKubeconfigValid(r.resource.Data[r.KubeConfigFileName]) {
|
||||
kubeconfig, err := kubeadm.CreateKubeconfig(
|
||||
r.KubeConfigFileName,
|
||||
|
||||
kubeadm.CertificatePrivateKeyPair{
|
||||
Certificate: apiServerCertificatesSecret.Data[kubeadmconstants.CACertName],
|
||||
PrivateKey: apiServerCertificatesSecret.Data[kubeadmconstants.CAKeyName],
|
||||
},
|
||||
config,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(err, "cannot create a valid kubeconfig")
|
||||
var shouldCreate bool
|
||||
|
||||
return err
|
||||
shouldCreate = shouldCreate || r.resource.Data == nil // Missing data key
|
||||
shouldCreate = shouldCreate || len(r.resource.Data) == 0 // Missing data key
|
||||
shouldCreate = shouldCreate || len(r.resource.Data[r.KubeConfigFileName]) == 0 // Missing kubeconfig file, must be generated
|
||||
shouldCreate = shouldCreate || !kubeadm.IsKubeconfigValid(r.resource.Data[r.KubeConfigFileName]) // invalid kubeconfig
|
||||
shouldCreate = shouldCreate || status.Checksum != checksum || len(r.resource.UID) == 0 // Wrong checksum
|
||||
|
||||
if shouldCreate {
|
||||
crtKeyPair := kubeadm.CertificatePrivateKeyPair{
|
||||
Certificate: caCertificatesSecret.Data[kubeadmconstants.CACertName],
|
||||
PrivateKey: caCertificatesSecret.Data[kubeadmconstants.CAKeyName],
|
||||
}
|
||||
|
||||
kubeconfig, kcErr := kubeadm.CreateKubeconfig(r.KubeConfigFileName, crtKeyPair, config)
|
||||
if kcErr != nil {
|
||||
logger.Error(kcErr, "cannot shouldCreate a valid kubeconfig")
|
||||
|
||||
return kcErr
|
||||
}
|
||||
|
||||
r.resource.Data = map[string][]byte{
|
||||
r.KubeConfigFileName: kubeconfig,
|
||||
}
|
||||
|
||||
@@ -4,5 +4,5 @@
|
||||
package upgrade
|
||||
|
||||
const (
|
||||
KubeadmVersion = "v1.27.0"
|
||||
KubeadmVersion = "v1.27.3"
|
||||
)
|
||||
|
||||
40
internal/webhook/handlers/tcp_name.go
Normal file
40
internal/webhook/handlers/tcp_name.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"gomodules.xyz/jsonpatch/v2"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/internal/webhook/utils"
|
||||
)
|
||||
|
||||
type TenantControlPlaneName struct{}
|
||||
|
||||
func (t TenantControlPlaneName) OnCreate(object runtime.Object) AdmissionResponse {
|
||||
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
|
||||
tcp := object.(*kamajiv1alpha1.TenantControlPlane) //nolint:forcetypeassert
|
||||
|
||||
if errs := validation.IsDNS1035Label(tcp.Name); len(errs) > 0 {
|
||||
return nil, fmt.Errorf("the provided name is invalid, %s", strings.Join(errs, ","))
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (t TenantControlPlaneName) OnDelete(runtime.Object) AdmissionResponse {
|
||||
return utils.NilOp()
|
||||
}
|
||||
|
||||
func (t TenantControlPlaneName) OnUpdate(runtime.Object, runtime.Object) AdmissionResponse {
|
||||
return utils.NilOp()
|
||||
}
|
||||
@@ -4,9 +4,8 @@
|
||||
package routes
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
|
||||
type TenantControlPlaneMigrate struct{}
|
||||
@@ -16,5 +15,5 @@ func (t TenantControlPlaneMigrate) GetPath() string {
|
||||
}
|
||||
|
||||
func (t TenantControlPlaneMigrate) GetObject() runtime.Object {
|
||||
return &kamajiv1alpha1.TenantControlPlane{}
|
||||
return &corev1.Namespace{}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user