Compare commits

...

35 Commits

Author SHA1 Message Date
Dario Tranchitella
84c8b1a135 chore(helm): releasing v0.3.0 2023-06-05 17:17:16 +02:00
Dario Tranchitella
7cf930cbe9 chore(kustomize): releasing v0.3.0 2023-06-05 17:17:16 +02:00
Dario Tranchitella
d5e146ef8f test(e2e): webhook validation for additional resources 2023-06-05 17:03:35 +02:00
Dario Tranchitella
cb5fb00d7b refactor(test): renaming tests 2023-06-05 17:03:35 +02:00
Dario Tranchitella
ed00b934ec feat: webhook validation for additional resources 2023-06-05 17:03:35 +02:00
Dario Tranchitella
dbaf3d1915 chore(helm): removing unusued datastore webhook 2023-06-05 17:03:35 +02:00
Dario Tranchitella
a625f2218c chore(kustomize): removing unusued datastore webhook 2023-06-05 17:03:35 +02:00
Dario Tranchitella
617e802d02 chore(project): webhooks are externally managed from operator-sdk 2023-06-05 17:03:35 +02:00
Dario Tranchitella
eca04893a8 refactor: abstracting webhook management 2023-06-05 17:03:35 +02:00
Dario Tranchitella
14c96b034a refactor(builder): abstracting deployment builders 2023-06-05 17:03:35 +02:00
Dario Tranchitella
f53271cb87 docs(api): container registry settings 2023-06-01 16:05:15 +02:00
Dario Tranchitella
8007fe8cd2 chore(helm): container registry settings 2023-06-01 16:05:15 +02:00
Dario Tranchitella
11d8262c74 chore(kustomize): container registry settings 2023-06-01 16:05:15 +02:00
Dario Tranchitella
877314f53d feat: container registry settings 2023-06-01 16:05:15 +02:00
Dario Tranchitella
27480ba66a feat(api): container registry settings 2023-06-01 16:05:15 +02:00
Dario Tranchitella
d3d18ef836 refactor: removing unused address from control-plane builder 2023-06-01 16:05:15 +02:00
bsctl
c81d190719 docs: improve navigation 2023-05-31 23:30:31 +02:00
Adriano Pezzuto
9284a43860 docs: new diagram of the architecture (#302)
* docs: new diagram of the architecture
2023-05-31 22:34:50 +02:00
Dario Tranchitella
6cab15551f docs: resource claims support 2023-05-30 16:24:18 +02:00
Dario Tranchitella
f0fb8b3c11 chore(helm)!: resource claims support 2023-05-30 16:24:18 +02:00
Dario Tranchitella
778a34a382 chore(kustomize): resource claims support 2023-05-30 16:24:18 +02:00
Dario Tranchitella
25b1c7a8fa feat: resource claims support 2023-05-30 16:24:18 +02:00
Dario Tranchitella
2c6360ad82 feat(api): resource claims support 2023-05-30 16:24:18 +02:00
Dario Tranchitella
523f1cf0e3 chore(kustomize): upgrading controller-gen dependency 2023-05-30 16:24:18 +02:00
Dario Tranchitella
4d6d1461cc chore: upgrading controller-gen dependency 2023-05-30 16:24:18 +02:00
Matteo Ruina
49e016d4da chore(samples): kine and konnectivity tcp examples 2023-05-30 16:00:11 +02:00
Matteo Ruina
b7a2d9da8c docs(api): tcp deployment mangling 2023-05-30 16:00:11 +02:00
Dario Tranchitella
39c7591457 chore(helm): tcp deployment mangling 2023-05-30 16:00:11 +02:00
Matteo Ruina
327438e236 chore(kustomize): tcp deployment mangling 2023-05-30 16:00:11 +02:00
Matteo Ruina
ba4b3eec8f test: tcp deployment mangling 2023-05-30 16:00:11 +02:00
Matteo Ruina
d06affc216 feat: tcp deployment mangling 2023-05-30 16:00:11 +02:00
Matteo Ruina
236540d89f chore(samples): tcp deployment mangling 2023-05-30 16:00:11 +02:00
Matteo Ruina
a5b7605e27 chore(api): tcp deployment mangling 2023-05-30 16:00:11 +02:00
Adriano Pezzuto
3821cf1d67 chore(docs): refactoring documentation and template 2023-05-30 14:31:34 +02:00
Giovanni Toraldo
be1737d908 Fix namespace with previous var 2023-05-11 18:51:14 +02:00
76 changed files with 20768 additions and 1722 deletions

View File

@@ -3,7 +3,7 @@
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
VERSION ?= 0.2.3
VERSION ?= 0.3.0
# CHANNELS define the bundle channels used in the bundle.
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
@@ -85,7 +85,7 @@ kind: ## Download kind locally if necessary.
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.9.2)
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.11.4)
GOLANGCI_LINT = $(shell pwd)/bin/golangci-lint
golangci-lint: ## Download golangci-lint locally if necessary.

View File

@@ -16,10 +16,6 @@ resources:
kind: TenantControlPlane
path: github.com/clastix/kamaji/api/v1alpha1
version: v1alpha1
webhooks:
defaulting: true
validation: true
webhookVersion: v1
- api:
crdVersion: v1
domain: clastix.io
@@ -27,8 +23,4 @@ resources:
kind: DataStore
path: github.com/clastix/kamaji/api/v1alpha1
version: v1alpha1
webhooks:
defaulting: true
validation: true
webhookVersion: v1
version: "3"

View File

@@ -9,59 +9,60 @@
</a>
</p>
**Kamaji** deploys and operates **Kubernetes** at scale with a fraction of the operational burden.
![Logo](assets/logo-black.png#gh-light-mode-only)
![Logo](assets/logo-white.png#gh-dark-mode-only)
<p align="center" style="padding: 6px 6px">
<img src="assets/kamaji-logo.png" />
</p>
**Kamaji** deploys and operates **Kubernetes Control Plane** at scale with a fraction of the operational burden. Kamaji is special because the Control Plane components are running in a single pod instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
## Why we are building it?
Global hyper-scalers are leading the Managed Kubernetes space, while other cloud providers, as well as large corporations, are struggling to offer the same experience to their DevOps teams because of the lack of the right tools. Also, current Kubernetes solutions are mainly designed with an enterprise-first approach and they are too costly when deployed at scale.
**Kamaji** aims to solve these pains by leveraging multi-tenancy and simplifying how to run multiple control planes on the same infrastructure with a fraction of the operational burden.
## How it works
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. Kamaji is special because the Control Planes of _“tenant clusters”_ are just regular pods instead of dedicated Virtual Machines. This solution makes running Control Planes at scale cheaper and easier to deploy and operate.
![Architecture](docs/content/images/kamaji-light.png#gh-light-mode-only)
![Architecture](docs/content/images/kamaji-dark.png#gh-dark-mode-only)
## Getting started
Please refer to the [Getting Started guide](https://kamaji.clastix.io/getting-started/) to deploy a minimal setup of Kamaji on KinD.
<img src="docs/content/images/architecture.png" width="600">
## Features
- **Self Service Kubernetes:** leave users the freedom to self-provision their Kubernetes clusters according to the assigned boundaries.
- **Multi-cluster Management:** centrally manage multiple tenant clusters from a single admin cluster. Happy SREs.
- **Cheaper Control Planes:** place multiple tenant control planes on a single node, instead of having three nodes for a single control plane.
- **Stronger Multi-Tenancy:** leave tenants to access the control plane with admin permissions while keeping the tenant isolated at the infrastructure level.
- **Multi-cluster Management:** centrally manage multiple clusters from a single admin cluster. Happy SREs.
- **Cheaper Control Planes:** place multiple control planes on a single node, instead of having three nodes for a single control plane.
- **Stronger Multi-Tenancy:** leave users to access the control plane with admin permissions while keeping them isolated at the infrastructure level.
- **Kubernetes Inception:** use Kubernetes to manage Kubernetes by re-using all the Kubernetes goodies you already know and love.
- **Full APIs compliant:** tenant clusters are fully CNCF compliant built with upstream Kubernetes binaries. A user does not see differences between a Kamaji provisioned cluster and a dedicated cluster.
- **Full APIs compliant:** all clusters are CNCF compliant built with upstream Kubernetes binaries
## Roadmap
- [x] Benchmarking
- [ ] Stress-test
- [x] Support for dynamic address allocation on native Load Balancer
- [x] Dynamic address on Load Balancer
- [x] Zero Downtime Tenant Control Plane upgrade
- [x] `konnectivity` integration
- [ ] Provisioning of Tenant Control Plane through Cluster APIs
- [x] Join worker nodes from anywhere
- [x] Alternative datastore MySQL and PostgreSQL
- [x] Pool of multiple datastores
- [x] Seamless migration between datastores
- [ ] Automatic assignment to a datastore
- [ ] Autoscaling of Tenant Control Plane
- [ ] Provisioning through Cluster APIs
- [ ] Terraform provider
- [ ] Custom Prometheus metrics for monitoring and alerting
- [x] `kine` integration for MySQL as datastore
- [x] `kine` integration for PostgreSQL as datastore
- [x] Pool of multiple datastores
- [x] Seamless migration between datastore with the same driver
- [ ] Automatic assigning of Tenant Control Plane to a datastore
- [ ] Autoscaling of Tenant Control Plane pods
## Documentation
Please, check the project's [documentation](https://kamaji.clastix.io/) for getting started with Kamaji.
## Contributions
Kamaji is Open Source with Apache 2 license and any contribution is welcome.
Kamaji is Open Source with Apache 2 license and any contribution is welcome. Open an issue or suggest an enhancement on the GitHub [project's page](https://github.com/clastix/kamaji). Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
## Community
Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
## FAQs
Q. What does Kamaji mean?
A. Kamaji is named as the character _Kamaji_ from the Japanese movie [_Spirited Away_](https://en.wikipedia.org/wiki/Spirited_Away).
Q. Is Kamaji another Kubernetes distribution?
A. No, Kamaji is a Kubernetes Operator you can install on top of any Kubernetes cluster to provide hundreds or thousands of managed Kubernetes clusters as a service. We tested Kamaji on vanilla Kubernetes 1.22+, KinD, and Azure AKS. We expect it to work smoothly on other Kubernetes distributions. The tenant clusters made with Kamaji are conformant CNCF Kubernetes clusters as we leverage [`kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
Q. Is it safe to run Kubernetes control plane components in a pod instead of dedicated virtual machines?
A. Yes, the tenant control plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
Q. You already provide a Kubernetes multi-tenancy solution with [Capsule](https://capsule.clastix.io). Why does Kamaji matter?
A. A multi-tenancy solution, like Capsule shares the Kubernetes control plane among all tenants keeping tenant namespaces isolated by policies. While the solution is the right choice by balancing between features and ease of usage, there are cases where a tenant user requires access to the control plane, for example, when a tenant requires to manage CRDs on his own. With Kamaji, you can provide cluster admin permissions to the tenant.
Q. Well you convinced me, how to get a try?
A. It is possible to get started with Kamaji on a laptop with [KinD](getting-started.md) installed.

View File

@@ -1,57 +0,0 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
"context"
"fmt"
"strings"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
//+kubebuilder:webhook:path=/validate--v1-secret,mutating=false,failurePolicy=ignore,sideEffects=None,groups="",resources=secrets,verbs=delete,versions=v1,name=vdatastoresecrets.kb.io,admissionReviewVersions=v1
type dataStoreSecretValidator struct {
log logr.Logger
client client.Client
}
func (d *dataStoreSecretValidator) ValidateCreate(context.Context, runtime.Object) error {
return nil
}
func (d *dataStoreSecretValidator) ValidateUpdate(context.Context, runtime.Object, runtime.Object) error {
return nil
}
func (d *dataStoreSecretValidator) ValidateDelete(ctx context.Context, obj runtime.Object) error {
secret := obj.(*corev1.Secret) //nolint:forcetypeassert
dsList := &DataStoreList{}
if err := d.client.List(ctx, dsList, client.MatchingFieldsSelector{Selector: fields.OneTermEqualSelector(DatastoreUsedSecretNamespacedNameKey, fmt.Sprintf("%s/%s", secret.GetNamespace(), secret.GetName()))}); err != nil {
return err
}
if len(dsList.Items) > 0 {
var res []string
for _, ds := range dsList.Items {
res = append(res, ds.GetName())
}
return fmt.Errorf("the Secret is used by the following kamajiv1alpha1.DataStores and cannot be deleted (%s)", strings.Join(res, ", "))
}
return nil
}
func (d *dataStoreSecretValidator) Default(context.Context, runtime.Object) error {
return nil
}

View File

@@ -1,185 +0,0 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
"context"
"fmt"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
//+kubebuilder:webhook:path=/mutate-kamaji-clastix-io-v1alpha1-datastore,mutating=true,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=datastores,verbs=create;update,versions=v1alpha1,name=mdatastore.kb.io,admissionReviewVersions=v1
//+kubebuilder:webhook:path=/validate-kamaji-clastix-io-v1alpha1-datastore,mutating=false,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=datastores,verbs=create;update;delete,versions=v1alpha1,name=vdatastore.kb.io,admissionReviewVersions=v1
func (in *DataStore) SetupWebhookWithManager(mgr ctrl.Manager) error {
secretValidator := &dataStoreSecretValidator{
log: mgr.GetLogger().WithName("datastore-secret-webhook"),
client: mgr.GetClient(),
}
if err := ctrl.NewWebhookManagedBy(mgr).For(&corev1.Secret{}).WithValidator(secretValidator).Complete(); err != nil {
return err
}
dsValidator := &dataStoreValidator{
log: mgr.GetLogger().WithName("datastore-webhook"),
client: mgr.GetClient(),
}
return ctrl.NewWebhookManagedBy(mgr).
For(in).
WithValidator(dsValidator).
WithDefaulter(dsValidator).
Complete()
}
type dataStoreValidator struct {
log logr.Logger
client client.Client
}
func (d *dataStoreValidator) ValidateCreate(ctx context.Context, obj runtime.Object) error {
ds, ok := obj.(*DataStore)
if !ok {
return fmt.Errorf("expected *kamajiv1alpha1.DataStore")
}
if err := d.validate(ctx, ds); err != nil {
return err
}
return nil
}
func (d *dataStoreValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error {
old, ok := oldObj.(*DataStore)
if !ok {
return fmt.Errorf("expected *kamajiv1alpha1.DataStore")
}
ds, ok := newObj.(*DataStore)
if !ok {
return fmt.Errorf("expected *kamajiv1alpha1.DataStore")
}
d.log.Info("validate update", "name", ds.GetName())
if ds.Spec.Driver != old.Spec.Driver {
return fmt.Errorf("driver of a DataStore cannot be changed")
}
if err := d.validate(ctx, ds); err != nil {
return err
}
return nil
}
func (d *dataStoreValidator) ValidateDelete(ctx context.Context, obj runtime.Object) error {
ds, ok := obj.(*DataStore)
if !ok {
return fmt.Errorf("expected *kamajiv1alpha1.DataStore")
}
tcpList := &TenantControlPlaneList{}
if err := d.client.List(ctx, tcpList, client.MatchingFieldsSelector{Selector: fields.OneTermEqualSelector(TenantControlPlaneUsedDataStoreKey, ds.GetName())}); err != nil {
return err
}
if len(tcpList.Items) > 0 {
return fmt.Errorf("the DataStore is used by multiple TenantControlPlanes and cannot be removed")
}
return nil
}
func (d *dataStoreValidator) Default(context.Context, runtime.Object) error {
return nil
}
func (d *dataStoreValidator) validate(ctx context.Context, ds *DataStore) error {
if ds.Spec.BasicAuth != nil {
if err := d.validateBasicAuth(ctx, ds); err != nil {
return err
}
}
if err := d.validateTLSConfig(ctx, ds); err != nil {
return err
}
return nil
}
func (d *dataStoreValidator) validateBasicAuth(ctx context.Context, ds *DataStore) error {
if err := d.validateContentReference(ctx, ds.Spec.BasicAuth.Password); err != nil {
return fmt.Errorf("basic-auth password is not valid, %w", err)
}
if err := d.validateContentReference(ctx, ds.Spec.BasicAuth.Username); err != nil {
return fmt.Errorf("basic-auth username is not valid, %w", err)
}
return nil
}
func (d *dataStoreValidator) validateTLSConfig(ctx context.Context, ds *DataStore) error {
if err := d.validateContentReference(ctx, ds.Spec.TLSConfig.CertificateAuthority.Certificate); err != nil {
return fmt.Errorf("CA certificate is not valid, %w", err)
}
if ds.Spec.Driver == EtcdDriver {
if ds.Spec.TLSConfig.CertificateAuthority.PrivateKey == nil {
return fmt.Errorf("CA private key is required when using the etcd driver")
}
}
if ds.Spec.TLSConfig.CertificateAuthority.PrivateKey != nil {
if err := d.validateContentReference(ctx, *ds.Spec.TLSConfig.CertificateAuthority.PrivateKey); err != nil {
return fmt.Errorf("CA private key is not valid, %w", err)
}
}
if err := d.validateContentReference(ctx, ds.Spec.TLSConfig.ClientCertificate.Certificate); err != nil {
return fmt.Errorf("client certificate is not valid, %w", err)
}
if err := d.validateContentReference(ctx, ds.Spec.TLSConfig.ClientCertificate.PrivateKey); err != nil {
return fmt.Errorf("client private key is not valid, %w", err)
}
return nil
}
func (d *dataStoreValidator) validateContentReference(ctx context.Context, ref ContentRef) error {
switch {
case len(ref.Content) > 0:
return nil
case ref.SecretRef == nil:
return fmt.Errorf("the Secret reference is mandatory when bare content is not specified")
case len(ref.SecretRef.SecretReference.Name) == 0:
return fmt.Errorf("the Secret reference name is mandatory")
case len(ref.SecretRef.SecretReference.Namespace) == 0:
return fmt.Errorf("the Secret reference namespace is mandatory")
}
if err := d.client.Get(ctx, types.NamespacedName{Name: ref.SecretRef.SecretReference.Name, Namespace: ref.SecretRef.SecretReference.Namespace}, &corev1.Secret{}); err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("secret %s/%s is not found", ref.SecretRef.SecretReference.Namespace, ref.SecretRef.SecretReference.Name)
}
return err
}
return nil
}

View File

@@ -0,0 +1,18 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
type RegistrySettings struct {
// +kubebuilder:default="registry.k8s.io"
Registry string `json:"registry,omitempty"`
// The tag to append to all the Control Plane container images.
// Optional.
TagSuffix string `json:"tagSuffix,omitempty"`
// +kubebuilder:default="kube-apiserver"
APIServerImage string `json:"apiServerImage,omitempty"`
// +kubebuilder:default="kube-controller-manager"
ControllerManagerImage string `json:"controllerManagerImage,omitempty"`
// +kubebuilder:default="kube-scheduler"
SchedulerImage string `json:"schedulerImage,omitempty"`
}

View File

@@ -0,0 +1,30 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
"fmt"
)
func (r *RegistrySettings) buildContainerImage(name, tag string) string {
image := fmt.Sprintf("%s/%s:%s", r.Registry, name, tag)
if len(r.TagSuffix) > 0 {
image += r.TagSuffix
}
return image
}
func (r *RegistrySettings) KubeAPIServerImage(version string) string {
return r.buildContainerImage(r.APIServerImage, version)
}
func (r *RegistrySettings) KubeSchedulerImage(version string) string {
return r.buildContainerImage(r.SchedulerImage, version)
}
func (r *RegistrySettings) KubeControllerManagerImage(version string) string {
return r.buildContainerImage(r.ControllerManagerImage, version)
}

View File

@@ -93,25 +93,20 @@ type IngressSpec struct {
Hostname string `json:"hostname,omitempty"`
}
// ComponentResourceRequirements describes the compute resource requirements.
type ComponentResourceRequirements struct {
// Limits describes the maximum amount of compute resources allowed.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
Limits corev1.ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
// Requests describes the minimum amount of compute resources required.
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
// otherwise to an implementation-defined value.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
Requests corev1.ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
}
type ControlPlaneComponentsResources struct {
APIServer *ComponentResourceRequirements `json:"apiServer,omitempty"`
ControllerManager *ComponentResourceRequirements `json:"controllerManager,omitempty"`
Scheduler *ComponentResourceRequirements `json:"scheduler,omitempty"`
APIServer *corev1.ResourceRequirements `json:"apiServer,omitempty"`
ControllerManager *corev1.ResourceRequirements `json:"controllerManager,omitempty"`
Scheduler *corev1.ResourceRequirements `json:"scheduler,omitempty"`
// Define the kine container resources.
// Available only if Kamaji is running using Kine as backing storage.
Kine *corev1.ResourceRequirements `json:"kine,omitempty"`
}
type DeploymentSpec struct {
// RegistrySettings allows to override the default images for the given Tenant Control Plane instance.
// It could be used to point to a different container registry rather than the public one.
// +kubebuilder:default={registry:"registry.k8s.io",apiServerImage:"kube-apiserver",controllerManagerImage:"kube-controller-manager",schedulerImage:"kube-scheduler"}
RegistrySettings RegistrySettings `json:"registrySettings,omitempty"`
// +kubebuilder:default=2
Replicas int32 `json:"replicas,omitempty"`
// NodeSelector is a selector which must be true for the pod to fit on a node.
@@ -146,6 +141,22 @@ type DeploymentSpec struct {
// such as kube-apiserver, controller-manager, and scheduler.
ExtraArgs *ControlPlaneExtraArgs `json:"extraArgs,omitempty"`
AdditionalMetadata AdditionalMetadata `json:"additionalMetadata,omitempty"`
// AdditionalInitContainers allows adding additional init containers to the Control Plane deployment.
AdditionalInitContainers []corev1.Container `json:"additionalInitContainers,omitempty"`
// AdditionalContainers allows adding additional containers to the Control Plane deployment.
AdditionalContainers []corev1.Container `json:"additionalContainers,omitempty"`
// AdditionalVolumes allows to add additional volumes to the Control Plane deployment.
AdditionalVolumes []corev1.Volume `json:"additionalVolumes,omitempty"`
// AdditionalVolumeMounts allows to mount an additional volume into each component of the Control Plane
// (kube-apiserver, controller-manager, and scheduler).
AdditionalVolumeMounts *AdditionalVolumeMounts `json:"additionalVolumeMounts,omitempty"`
}
// AdditionalVolumeMounts allows mounting additional volumes to the Control Plane components.
type AdditionalVolumeMounts struct {
APIServer []corev1.VolumeMount `json:"apiServer,omitempty"`
ControllerManager []corev1.VolumeMount `json:"controllerManager,omitempty"`
Scheduler []corev1.VolumeMount `json:"scheduler,omitempty"`
}
// ControlPlaneExtraArgs allows specifying additional arguments to the Control Plane components.
@@ -190,8 +201,8 @@ type KonnectivityServerSpec struct {
// +kubebuilder:default=registry.k8s.io/kas-network-proxy/proxy-server
Image string `json:"image,omitempty"`
// Resources define the amount of CPU and memory to allocate to the Konnectivity server.
Resources *ComponentResourceRequirements `json:"resources,omitempty"`
ExtraArgs ExtraArgs `json:"extraArgs,omitempty"`
Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
ExtraArgs ExtraArgs `json:"extraArgs,omitempty"`
}
type KonnectivityAgentSpec struct {

View File

@@ -1,188 +0,0 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
"context"
"fmt"
"strings"
"github.com/blang/semver"
"github.com/go-logr/logr"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/clastix/kamaji/internal/upgrade"
)
//+kubebuilder:webhook:path=/mutate-kamaji-clastix-io-v1alpha1-tenantcontrolplane,mutating=true,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=tenantcontrolplanes,verbs=create;update,versions=v1alpha1,name=mtenantcontrolplane.kb.io,admissionReviewVersions=v1
//+kubebuilder:webhook:path=/validate-kamaji-clastix-io-v1alpha1-tenantcontrolplane,mutating=false,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=tenantcontrolplanes,verbs=create;update,versions=v1alpha1,name=vtenantcontrolplane.kb.io,admissionReviewVersions=v1
func (in *TenantControlPlane) SetupWebhookWithManager(mgr ctrl.Manager, datastore string) error {
validator := &tenantControlPlaneValidator{
client: mgr.GetClient(),
defaultDatastore: datastore,
log: mgr.GetLogger().WithName("tenantcontrolplane-webhook"),
}
return ctrl.NewWebhookManagedBy(mgr).
For(in).
WithValidator(validator).
WithDefaulter(validator).
Complete()
}
type tenantControlPlaneValidator struct {
client client.Client
defaultDatastore string
log logr.Logger
}
func (t *tenantControlPlaneValidator) Default(_ context.Context, obj runtime.Object) error {
tcp, ok := obj.(*TenantControlPlane)
if !ok {
return fmt.Errorf("expected *kamajiv1alpha1.TenantControlPlane")
}
if len(tcp.Spec.DataStore) == 0 {
tcp.Spec.DataStore = t.defaultDatastore
}
return nil
}
func (t *tenantControlPlaneValidator) ValidateCreate(_ context.Context, obj runtime.Object) error {
tcp, ok := obj.(*TenantControlPlane)
if !ok {
return fmt.Errorf("expected *kamajiv1alpha1.TenantControlPlane")
}
t.log.Info("validate create", "name", tcp.Name, "namespace", tcp.Namespace)
ver, err := semver.New(t.normalizeKubernetesVersion(tcp.Spec.Kubernetes.Version))
if err != nil {
return errors.Wrap(err, "unable to parse the desired Kubernetes version")
}
supportedVer, supportedErr := semver.Make(t.normalizeKubernetesVersion(upgrade.KubeadmVersion))
if supportedErr != nil {
return errors.Wrap(supportedErr, "unable to parse the Kamaji supported Kubernetes version")
}
if ver.GT(supportedVer) {
return fmt.Errorf("unable to create a TenantControlPlane with a Kubernetes version greater than the supported one, actually %s", supportedVer.String())
}
if err = t.validatePreferredKubeletAddressTypes(tcp.Spec.Kubernetes.Kubelet.PreferredAddressTypes); err != nil {
return err
}
return nil
}
func (t *tenantControlPlaneValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error {
old, ok := oldObj.(*TenantControlPlane)
if !ok {
return fmt.Errorf("expected *kamajiv1alpha1.TenantControlPlane")
}
tcp, ok := newObj.(*TenantControlPlane)
if !ok {
return fmt.Errorf("expected *kamajiv1alpha1.TenantControlPlane")
}
t.log.Info("validate update", "name", tcp.Name, "namespace", tcp.Namespace)
if err := t.validateVersionUpdate(old, tcp); err != nil {
return err
}
if err := t.validateDataStore(ctx, old, tcp); err != nil {
return err
}
if err := t.validatePreferredKubeletAddressTypes(tcp.Spec.Kubernetes.Kubelet.PreferredAddressTypes); err != nil {
return err
}
return nil
}
func (t *tenantControlPlaneValidator) ValidateDelete(context.Context, runtime.Object) error {
return nil
}
func (t *tenantControlPlaneValidator) validatePreferredKubeletAddressTypes(addressTypes []KubeletPreferredAddressType) error {
s := sets.NewString()
for _, at := range addressTypes {
if s.Has(string(at)) {
return fmt.Errorf("preferred kubelet address types is stated multiple times: %s", at)
}
s.Insert(string(at))
}
return nil
}
func (t *tenantControlPlaneValidator) validateVersionUpdate(oldObj, newObj *TenantControlPlane) error {
oldVer, oldErr := semver.Make(t.normalizeKubernetesVersion(oldObj.Spec.Kubernetes.Version))
if oldErr != nil {
return errors.Wrap(oldErr, "unable to parse the previous Kubernetes version")
}
newVer, newErr := semver.New(t.normalizeKubernetesVersion(newObj.Spec.Kubernetes.Version))
if newErr != nil {
return errors.Wrap(newErr, "unable to parse the desired Kubernetes version")
}
supportedVer, supportedErr := semver.Make(t.normalizeKubernetesVersion(upgrade.KubeadmVersion))
if supportedErr != nil {
return errors.Wrap(supportedErr, "unable to parse the Kamaji supported Kubernetes version")
}
switch {
case newVer.GT(supportedVer):
return fmt.Errorf("unable to upgrade to a version greater than the supported one, actually %s", supportedVer.String())
case newVer.LT(oldVer):
return fmt.Errorf("unable to downgrade a TenantControlPlane from %s to %s", oldVer.String(), newVer.String())
case newVer.Minor-oldVer.Minor > 1:
return fmt.Errorf("unable to upgrade to a minor version in a non-sequential mode")
}
return nil
}
func (t *tenantControlPlaneValidator) validateDataStore(ctx context.Context, oldObj, tcp *TenantControlPlane) error {
if oldObj.Spec.DataStore == tcp.Spec.DataStore {
return nil
}
previousDatastore, desiredDatastore := &DataStore{}, &DataStore{}
if err := t.client.Get(ctx, types.NamespacedName{Name: oldObj.Spec.DataStore}, previousDatastore); err != nil {
return fmt.Errorf("unable to retrieve old DataStore for validation: %w", err)
}
if err := t.client.Get(ctx, types.NamespacedName{Name: tcp.Spec.DataStore}, desiredDatastore); err != nil {
return fmt.Errorf("unable to retrieve old DataStore for validation: %w", err)
}
if previousDatastore.Spec.Driver != desiredDatastore.Spec.Driver {
return fmt.Errorf("migration between different Datastore drivers is not supported")
}
return nil
}
func (t *tenantControlPlaneValidator) normalizeKubernetesVersion(input string) string {
if strings.HasPrefix(input, "v") {
return strings.Replace(input, "v", "", 1)
}
return input
}

View File

@@ -1,123 +0,0 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
"context"
"crypto/tls"
"fmt"
"net"
"path/filepath"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
admissionv1beta1 "k8s.io/api/admission/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
//+kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var (
cfg *rest.Config
k8sClient client.Client
testEnv *envtest.Environment
ctx context.Context
cancel context.CancelFunc
)
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Webhook Suite")
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
ctx, cancel = context.WithCancel(context.TODO())
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: false,
WebhookInstallOptions: envtest.WebhookInstallOptions{
Paths: []string{filepath.Join("..", "..", "config", "webhook")},
},
}
var err error
// cfg is defined in this file globally.
cfg, err = testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
scheme := runtime.NewScheme()
err = AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = admissionv1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
//+kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())
// start webhook server using Manager
webhookInstallOptions := &testEnv.WebhookInstallOptions
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme,
Host: webhookInstallOptions.LocalServingHost,
Port: webhookInstallOptions.LocalServingPort,
CertDir: webhookInstallOptions.LocalServingCertDir,
LeaderElection: false,
MetricsBindAddress: "0",
})
Expect(err).NotTo(HaveOccurred())
err = (&TenantControlPlane{}).SetupWebhookWithManager(mgr, "")
Expect(err).NotTo(HaveOccurred())
err = (&DataStore{}).SetupWebhookWithManager(mgr)
Expect(err).NotTo(HaveOccurred())
//+kubebuilder:scaffold:webhook
go func() {
defer GinkgoRecover()
err = mgr.Start(ctx)
Expect(err).NotTo(HaveOccurred())
}()
// wait for the webhook server to get ready
dialer := &net.Dialer{Timeout: time.Second}
addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort)
Eventually(func() error {
conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true})
if err != nil {
return err
}
conn.Close()
return nil
}).Should(Succeed())
})
var _ = AfterSuite(func() {
cancel()
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})

View File

@@ -10,7 +10,7 @@ package v1alpha1
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -58,6 +58,42 @@ func (in *AdditionalMetadata) DeepCopy() *AdditionalMetadata {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AdditionalVolumeMounts) DeepCopyInto(out *AdditionalVolumeMounts) {
*out = *in
if in.APIServer != nil {
in, out := &in.APIServer, &out.APIServer
*out = make([]v1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ControllerManager != nil {
in, out := &in.ControllerManager, &out.ControllerManager
*out = make([]v1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Scheduler != nil {
in, out := &in.Scheduler, &out.Scheduler
*out = make([]v1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalVolumeMounts.
func (in *AdditionalVolumeMounts) DeepCopy() *AdditionalVolumeMounts {
if in == nil {
return nil
}
out := new(AdditionalVolumeMounts)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AddonSpec) DeepCopyInto(out *AddonSpec) {
*out = *in
@@ -254,35 +290,6 @@ func (in *ClientCertificate) DeepCopy() *ClientCertificate {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ComponentResourceRequirements) DeepCopyInto(out *ComponentResourceRequirements) {
*out = *in
if in.Limits != nil {
in, out := &in.Limits, &out.Limits
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentResourceRequirements.
func (in *ComponentResourceRequirements) DeepCopy() *ComponentResourceRequirements {
if in == nil {
return nil
}
out := new(ComponentResourceRequirements)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContentRef) DeepCopyInto(out *ContentRef) {
*out = *in
@@ -335,17 +342,22 @@ func (in *ControlPlaneComponentsResources) DeepCopyInto(out *ControlPlaneCompone
*out = *in
if in.APIServer != nil {
in, out := &in.APIServer, &out.APIServer
*out = new(ComponentResourceRequirements)
*out = new(v1.ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.ControllerManager != nil {
in, out := &in.ControllerManager, &out.ControllerManager
*out = new(ComponentResourceRequirements)
*out = new(v1.ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.Scheduler != nil {
in, out := &in.Scheduler, &out.Scheduler
*out = new(ComponentResourceRequirements)
*out = new(v1.ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.Kine != nil {
in, out := &in.Kine, &out.Kine
*out = new(v1.ResourceRequirements)
(*in).DeepCopyInto(*out)
}
}
@@ -547,9 +559,25 @@ func (in *DataStoreStatus) DeepCopy() *DataStoreStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DatastoreUsedSecret) DeepCopyInto(out *DatastoreUsedSecret) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatastoreUsedSecret.
func (in *DatastoreUsedSecret) DeepCopy() *DatastoreUsedSecret {
if in == nil {
return nil
}
out := new(DatastoreUsedSecret)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
*out = *in
out.RegistrySettings = in.RegistrySettings
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
@@ -557,6 +585,7 @@ func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
(*out)[key] = val
}
}
in.Strategy.DeepCopyInto(&out.Strategy)
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
@@ -587,6 +616,32 @@ func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
(*in).DeepCopyInto(*out)
}
in.AdditionalMetadata.DeepCopyInto(&out.AdditionalMetadata)
if in.AdditionalInitContainers != nil {
in, out := &in.AdditionalInitContainers, &out.AdditionalInitContainers
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AdditionalContainers != nil {
in, out := &in.AdditionalContainers, &out.AdditionalContainers
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AdditionalVolumes != nil {
in, out := &in.AdditionalVolumes, &out.AdditionalVolumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AdditionalVolumeMounts != nil {
in, out := &in.AdditionalVolumeMounts, &out.AdditionalVolumeMounts
*out = new(AdditionalVolumeMounts)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
@@ -757,7 +812,7 @@ func (in *KonnectivityServerSpec) DeepCopyInto(out *KonnectivityServerSpec) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(ComponentResourceRequirements)
*out = new(v1.ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.ExtraArgs != nil {
@@ -901,6 +956,11 @@ func (in *KubeconfigsStatus) DeepCopy() *KubeconfigsStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletSpec) DeepCopyInto(out *KubeletSpec) {
*out = *in
if in.PreferredAddressTypes != nil {
in, out := &in.PreferredAddressTypes, &out.PreferredAddressTypes
*out = make([]KubeletPreferredAddressType, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletSpec.
@@ -965,7 +1025,7 @@ func (in *KubernetesServiceStatus) DeepCopy() *KubernetesServiceStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubernetesSpec) DeepCopyInto(out *KubernetesSpec) {
*out = *in
out.Kubelet = in.Kubelet
in.Kubelet.DeepCopyInto(&out.Kubelet)
if in.AdmissionControllers != nil {
in, out := &in.AdmissionControllers, &out.AdmissionControllers
*out = make(AdmissionControllers, len(*in))
@@ -1067,6 +1127,21 @@ func (in *PublicKeyPrivateKeyPairStatus) DeepCopy() *PublicKeyPrivateKeyPairStat
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RegistrySettings) DeepCopyInto(out *RegistrySettings) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySettings.
func (in *RegistrySettings) DeepCopy() *RegistrySettings {
if in == nil {
return nil
}
out := new(RegistrySettings)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
*out = *in
@@ -1233,3 +1308,18 @@ func (in *TenantControlPlaneStatus) DeepCopy() *TenantControlPlaneStatus {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TenantControlPlaneStatusDataStore) DeepCopyInto(out *TenantControlPlaneStatusDataStore) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantControlPlaneStatusDataStore.
func (in *TenantControlPlaneStatusDataStore) DeepCopy() *TenantControlPlaneStatusDataStore {
if in == nil {
return nil
}
out := new(TenantControlPlaneStatusDataStore)
in.DeepCopyInto(out)
return out
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" role="img" viewBox="11.85 8.10 202.80 187.55"><title>Kamaji</title><path d="M32.1 13.7c-2.4.9-6.3 3.5-8.6 5.8-7.7 7.7-7.5 5-7.5 82.5 0 77.4-.2 74.8 7.5 82.5 7.7 7.8 4.2 7.5 90 7.5s82.3.3 90-7.5c7.7-7.7 7.5-5.1 7.5-82.5s.2-74.8-7.5-82.5c-7.8-7.8-4.1-7.5-90.4-7.4-66.7 0-77.2.3-81 1.6zm160.5 9.9c1.9.9 4.4 3.1 5.7 4.8l2.2 3.1v141l-2.2 3.1c-4.8 6.7-1.1 6.4-84.8 6.4s-80 .3-84.8-6.4l-2.2-3.1v-141l2.2-3.1c4.8-6.6.8-6.4 84.6-6.4 68 0 76.3.2 79.3 1.6z"/><path d="M90.1 33.7c-5.1 2.5-7.3 6.7-6.8 13.1.3 4.1 1 5.9 3.3 8.4s2.5 3 .9 2.3c-2-.7-25.1-4.6-29-4.9-1.1 0-2 .5-2 1.4 0 1.1-1.2 1.5-4.9 1.5-6.7 0-6.8 1.9-.4 4 8.2 2.7 9 3.4 3.3 3.5-5.3 0-8.2 1.1-7.1 2.8.7 1.2-2.7 2.2-8.1 2.2-7 0-6.5 2.4 1.1 5.1l3.9 1.4-2.9.5c-4.3.8-3.2 2.3 2.8 4.1l5.3 1.5-5.2 2.7c-8.2 4.2-8.3 5.8-.4 6.1 5.6.2 7.3 1.1 4.2 2.1-2.3.7-2.8 3.1-.9 3.7.7.3-.5 2-2.8 4-5.6 5.3-4 6.4 6.2 4.5 4.4-.8 8.1-1.3 8.3-1.2.2.2-1.3 2.4-3.3 4.8-2 2.4-3.6 4.7-3.6 5.2 0 .4 1.4.5 3 .3 2.9-.4 4 .5 2 1.7-.5.3-1 1.3-1 2.2 0 1.6 2.2 1.5 6.5-.3 1.7-.7 1.6-.2-.9 3-5.4 7.2.7 6.5 13.6-1.4 2.7-1.7 5.1-3 5.4-3 .3 0-.9 2.1-2.7 4.6-4.5 6.6-2.5 7.9 3.7 2.3 4.6-4.3 4.7-4.3 3-1.2-1.9 3.8-2.1 5.6-.4 5.1.6-.2 7.1-7.1 14.3-15.4 7.2-8.2 13.7-14.9 14.5-14.9.8 0 7.3 6.7 14.6 15 7.2 8.2 13.7 15.1 14.3 15.3 1.6.5 1.4-1.4-.5-5-1.6-3.2-1.6-3.2 3.2 1 6 5.1 7.8 4 3.5-2.2-1.8-2.5-3-4.6-2.7-4.6.3 0 2.7 1.3 5.4 3 12.9 7.9 19 8.6 13.6 1.4-2.5-3.2-2.6-3.7-.9-3 5.9 2.5 7.7 1.7 5.6-2.3-.9-1.5-.6-1.7 2-1.3 3.8.6 3.7-.5-.7-5.7-2-2.3-3.5-4.4-3.2-4.6.2-.2 2.1 0 4.3.4 13.9 3 16.4 1.8 9.8-4.3-2.1-1.9-3.2-3.6-2.5-3.6 2 0 1.4-2.8-.9-3.5-3.2-1-1.3-2 4.2-2.1 7.9-.2 7.8-1.9-.4-6.1l-5.2-2.7 5.4-1.6c6.4-1.8 7.9-4 2.9-4.1h-3.3l3.9-1.5c7.3-2.6 8.4-5.4 2.2-5.4-5.1 0-9.6-1.1-9-2.2 1.1-1.7-1.8-2.8-7.1-2.8-5.7-.1-4.9-.8 3.3-3.5 6.4-2.1 6.3-4-.4-4-3.7 0-4.9-.4-4.9-1.5 0-.9-.9-1.4-2-1.4-3.9.3-27 4.2-29 4.9-1.6.7-1.4.2.9-2.3 3.7-4 4.7-11.3 2.2-16.1-4.8-9.2-18.8-9.3-23.8 0-4.4 8.3.2 18.4 9.5 20.5 3 .6 2.8.8-5.5 4l-8.8 3.3-8.7-3.3c-8.1-3.2-8.4-3.4-5.5-4.1 1.7-.3 4.3-1.5 5.7-2.7 13.1-10.3.6-30.4-14.4-23.1zm77.6 98.4c-3.6 2.1-.8 7.7 3.2 6.4 2.1-.6 3.5-3.1 2.5-4.6-1.1-1.8-4-2.7-5.7-1.8zm8.3 3.9c0 1.9.5 2.1 6.3 1.8 4.7-.2 6.2-.7 6.2-1.8s-1.5-1.6-6.2-1.8c-5.8-.3-6.3-.1-6.3 1.8zm-135.6.3c-.2.7-.3 7.4-.2 14.8l.3 13.4 3.3.3c3.1.3 3.2.2 3.2-3.4 0-2.5.7-4.6 2.1-6l2.1-2.3 5 6c3.9 4.7 5.6 5.9 7.8 5.9 1.6 0 3.1-.3 3.3-.8.3-.4-2.1-4-5.4-8.1-3.2-4-5.9-7.6-5.9-8 0-.4 2.5-3.1 5.5-6.1 3-3 5.5-5.8 5.5-6.2 0-.4-1.5-.8-3.3-.8-2.8 0-4.4 1-9.6 6.5-3.5 3.6-6.5 6.5-6.7 6.5-.2 0-.4-2.9-.4-6.5V135h-3c-1.7 0-3.3.6-3.6 1.3zm31.2 7c-1.1.8-1.5 1.9-1 3 .5 1.4 1.3 1.6 4 1.1 4.2-.8 8.4.2 8.4 2 0 .8-1.8 1.5-5.1 1.9-6 .7-8.9 2.9-8.9 6.6 0 3.2.8 4.4 3.7 6 2.9 1.5 5.2 1.4 8.6-.3 2.3-1.3 2.7-1.3 2.7 0 0 .9 1.1 1.4 3 1.4h3v-8.6c0-8.1-.1-8.7-2.9-11.5-2.5-2.5-3.7-2.9-8.3-2.9-3 0-6.2.6-7.2 1.3zm11.2 13.9c-.2 1.7-1.1 2.4-3.2 2.6-3.3.4-5.1-1-4.3-3.2.4-1.1 1.9-1.6 4.2-1.6 3.2 0 3.6.3 3.3 2.2zm13.4-4l.3 11.3h6l.5-7.8c.5-7.6 1.5-9.6 4.7-9.7 3 0 4.3 3.2 4.3 10.6v7.4h3c3 0 3 0 3-5.9 0-7.3 1.2-10.7 4.1-11.6 3.8-1.3 5.9 2.5 5.9 10.6v6.9h6v-9c0-8.3-.2-9.3-2.5-11.5-2.9-3-9.8-3.5-12.7-.8-1.7 1.5-1.9 1.5-3.6 0-2.2-2-9.2-2.3-11.1-.5-1.1 1-1.4 1-1.8 0-.3-.6-1.8-1.2-3.4-1.2h-3l.3 11.2zm45.4-9.9c-1.1.8-1.5 1.9-1 3 .5 1.4 1.3 1.6 4 1.1 4.2-.8 8.4.2 8.4 2 0 .8-1.8 1.5-5.1 1.9-6 .7-8.9 2.9-8.9 6.6 0 3.2.8 4.4 3.7 6 2.9 1.5 5.2 1.4 8.6-.3 2.3-1.3 2.7-1.3 2.7 0 0 .9 1.1 1.4 3 1.4h3v-8.6c0-8.1-.1-8.7-2.9-11.5-2.5-2.5-3.7-2.9-8.3-2.9-3 0-6.2.6-7.2 1.3zm11.2 13.9c-.2 1.7-1.1 2.4-3.2 2.6-3.3.4-5.1-1-4.3-3.2.4-1.1 1.9-1.6 4.2-1.6 3.2 0 3.6.3 3.3 2.2zm13-2.5c-.3 12.8-.3 12.8-2.7 12.8-1.5 0-2.7.8-3.1 2-2 5.4 9.4 4.3 11.9-1.2.6-1.3 1.1-7.7 1.1-14.3v-12h-6.9l-.3 12.7zm13.4-1.5l.3 11.3h6v-22l-3.3-.3-3.3-.3.3 11.3z"/></svg>

Before

Width:  |  Height:  |  Size: 3.6 KiB

BIN
assets/logo-black.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.7 KiB

BIN
assets/logo-white.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 119 KiB

View File

@@ -1,5 +1,5 @@
apiVersion: v2
appVersion: v0.2.3
appVersion: v0.3.0
description: Kamaji is a tool aimed to build and operate a Managed Kubernetes Service
with a fraction of the operational burden. With Kamaji, you can deploy and operate
hundreds of Kubernetes clusters as a hyper-scaler.
@@ -17,7 +17,7 @@ name: kamaji
sources:
- https://github.com/clastix/kamaji
type: application
version: 0.11.5
version: 0.12.0
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/release-name: kamaji

View File

@@ -1,6 +1,6 @@
# kamaji
![Version: 0.11.5](https://img.shields.io/badge/Version-0.11.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.2.3](https://img.shields.io/badge/AppVersion-v0.2.3-informational?style=flat-square)
![Version: 0.12.0](https://img.shields.io/badge/Version-0.12.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.3.0](https://img.shields.io/badge/AppVersion-v0.3.0-informational?style=flat-square)
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler.

View File

@@ -4,7 +4,7 @@ kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: kamaji-system/kamaji-serving-cert
controller-gen.kubebuilder.io/version: v0.9.2
controller-gen.kubebuilder.io/version: v0.11.4
name: datastores.kamaji.clastix.io
spec:
group: kamaji.clastix.io

File diff suppressed because it is too large Load Diff

View File

@@ -8,26 +8,6 @@ metadata:
{{- include "kamaji.labels" $data | nindent 4 }}
name: kamaji-mutating-webhook-configuration
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: {{ include "kamaji.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
path: /mutate-kamaji-clastix-io-v1alpha1-datastore
failurePolicy: Fail
name: mdatastore.kb.io
rules:
- apiGroups:
- kamaji.clastix.io
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- datastores
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:

View File

@@ -23,8 +23,11 @@ import (
"github.com/clastix/kamaji/controllers"
"github.com/clastix/kamaji/controllers/soot"
"github.com/clastix/kamaji/internal"
"github.com/clastix/kamaji/internal/builders/controlplane"
datastoreutils "github.com/clastix/kamaji/internal/datastore/utils"
"github.com/clastix/kamaji/internal/webhook"
"github.com/clastix/kamaji/internal/webhook/handlers"
"github.com/clastix/kamaji/internal/webhook/routes"
)
func NewCmd(scheme *runtime.Scheme) *cobra.Command {
@@ -126,12 +129,6 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
return err
}
if err = (&webhook.Freeze{}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to register webhook", "webhook", "Freeze")
return err
}
if err = (&kamajiv1alpha1.DatastoreUsedSecret{}).SetupWithManager(ctx, mgr); err != nil {
setupLog.Error(err, "unable to create indexer", "indexer", "DatastoreUsedSecret")
@@ -144,13 +141,37 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
return err
}
if err = (&kamajiv1alpha1.TenantControlPlane{}).SetupWebhookWithManager(mgr, datastore); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "TenantControlPlane")
return err
}
if err = (&kamajiv1alpha1.DataStore{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "DataStore")
err = webhook.Register(mgr, map[routes.Route][]handlers.Handler{
routes.TenantControlPlaneMigrate{}: {
handlers.Freeze{},
},
routes.TenantControlPlaneDefaults{}: {
handlers.TenantControlPlaneDefaults{DefaultDatastore: datastore},
},
routes.TenantControlPlaneValidate{}: {
handlers.TenantControlPlaneVersion{},
handlers.TenantControlPlaneKubeletAddresses{},
handlers.TenantControlPlaneDataStore{Client: mgr.GetClient()},
handlers.TenantControlPlaneDeployment{
Client: mgr.GetClient(),
DeploymentBuilder: controlplane.Deployment{
Client: mgr.GetClient(),
KineContainerImage: kineImage,
},
KonnectivityBuilder: controlplane.Konnectivity{
Scheme: *mgr.GetScheme(),
},
},
},
routes.DataStoreValidate{}: {
handlers.DataStoreValidation{Client: mgr.GetClient()},
},
routes.DataStoreSecrets{}: {
handlers.DataStoreSecretValidation{Client: mgr.GetClient()},
},
})
if err != nil {
setupLog.Error(err, "unable to create webhook")
return err
}
@@ -187,6 +208,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
return nil
},
}
// Setting zap logger
zapfs := flag.NewFlagSet("zap", flag.ExitOnError)
opts := zap.Options{

View File

@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.2
creationTimestamp: null
controller-gen.kubebuilder.io/version: v0.11.4
name: datastores.kamaji.clastix.io
spec:
group: kamaji.clastix.io

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -13,4 +13,4 @@ kind: Kustomization
images:
- name: controller
newName: clastix/kamaji
newTag: v0.2.3
newTag: v0.3.0

View File

@@ -2,7 +2,6 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:

View File

@@ -0,0 +1,30 @@
apiVersion: kamaji.clastix.io/v1alpha1
kind: TenantControlPlane
metadata:
name: additionalcontainers
spec:
dataStore: postgresql-bronze
controlPlane:
deployment:
replicas: 1
additionalInitContainers:
- name: init
image: registry.k8s.io/e2e-test-images/busybox:1.29-4
command:
- /bin/sh
- -c
- echo hello world
additionalContainers:
- name: nginx
image: registry.k8s.io/e2e-test-images/nginx:1.15-4
service:
serviceType: LoadBalancer
kubernetes:
version: "v1.26.0"
kubelet:
cgroupfs: systemd
networkProfile:
port: 6443
addons:
coreDNS: {}
kubeProxy: {}

View File

@@ -0,0 +1,60 @@
apiVersion: kamaji.clastix.io/v1alpha1
kind: TenantControlPlane
metadata:
name: additional-volumes
spec:
controlPlane:
deployment:
replicas: 1
additionalVolumes:
- name: api-server-volume
configMap:
name: api-server-extra-cm
- name: controller-manager-volume
configMap:
name: controller-manager-extra-cm
- name: scheduler-volume
configMap:
name: scheduler-extra-cm
additionalVolumeMounts:
apiServer:
- name: api-server-volume
mountPath: "/tmp/api-server"
controllerManager:
- name: controller-manager-volume
mountPath: "/tmp/controller-manager"
scheduler:
- name: scheduler-volume
mountPath: "/tmp/scheduler"
service:
serviceType: LoadBalancer
kubernetes:
version: "v1.26.0"
kubelet:
cgroupfs: systemd
networkProfile:
port: 6443
addons:
coreDNS: {}
kubeProxy: {}
---
apiVersion: v1
data:
api-server: "This is an API Server volume"
kind: ConfigMap
metadata:
name: api-server-extra-cm
---
apiVersion: v1
data:
controller-manager: "This is a Controller Manager volume"
kind: ConfigMap
metadata:
name: controller-manager-extra-cm
---
apiVersion: v1
data:
controller-manager: "This is a Scheduler volume"
kind: ConfigMap
metadata:
name: scheduler-extra-cm

View File

@@ -0,0 +1,18 @@
apiVersion: kamaji.clastix.io/v1alpha1
kind: TenantControlPlane
metadata:
name: kine
spec:
addons:
coreDNS: {}
kubeProxy: {}
controlPlane:
deployment:
replicas: 1
service:
serviceType: LoadBalancer
dataStore: postgresql-bronze
kubernetes:
kubelet:
cgroupfs: systemd
version: v1.26.0

View File

@@ -0,0 +1,21 @@
apiVersion: kamaji.clastix.io/v1alpha1
kind: TenantControlPlane
metadata:
name: konnectivity-addon
spec:
deployment:
replicas: 2
service:
serviceType: LoadBalancer
kubernetes:
version: "v1.26.0"
kubelet:
cgroupfs: systemd
networkProfile:
port: 6443
addons:
coreDNS: {}
kubeProxy: {}
konnectivity:
server:
port: 8132

View File

@@ -2,29 +2,8 @@
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
creationTimestamp: null
name: mutating-webhook-configuration
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: webhook-service
namespace: system
path: /mutate-kamaji-clastix-io-v1alpha1-datastore
failurePolicy: Fail
name: mdatastore.kb.io
rules:
- apiGroups:
- kamaji.clastix.io
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- datastores
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
@@ -49,7 +28,6 @@ webhooks:
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
creationTimestamp: null
name: validating-webhook-configuration
webhooks:
- admissionReviewVersions:

View File

@@ -14,6 +14,7 @@ import (
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/controllers/finalizers"
builder "github.com/clastix/kamaji/internal/builders/controlplane"
"github.com/clastix/kamaji/internal/datastore"
"github.com/clastix/kamaji/internal/resources"
ds "github.com/clastix/kamaji/internal/resources/datastore"
@@ -245,7 +246,7 @@ func getKonnectivityServerRequirementsResources(c client.Client) []resources.Res
func getKonnectivityServerPatchResources(c client.Client) []resources.Resource {
return []resources.Resource{
&konnectivity.KubernetesDeploymentResource{Client: c},
&konnectivity.KubernetesDeploymentResource{Builder: builder.Konnectivity{Scheme: *c.Scheme()}, Client: c},
&konnectivity.ServiceResource{Client: c},
}
}

View File

@@ -1,11 +1,12 @@
# Setup Kamaji on a generic infrastructure
This guide will lead you through the process of creating a working Kamaji setup on a generic infrastructure, either virtual or bare metal.
# Getting started with Kamaji
This guide will lead you through the process of creating a working Kamaji setup on a generic infrastructure.
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
!!! warning ""
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
The guide requires:
- one bootstrap workstation
- a bootstrap machine
- a Kubernetes cluster to run the Admin and Tenant Control Planes
- an arbitrary number of machines to host `Tenant`s' workloads
@@ -13,19 +14,20 @@ The guide requires:
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
* [Access Admin cluster](#access-admin-cluster)
* [Install Cert Manager](#install-cert-manager)
* [Install Kamaji controller](#install-kamaji-controller)
* [Create Tenant Cluster](#create-tenant-cluster)
* [Cleanup](#cleanup)
## Prepare the bootstrap workspace
This guide is supposed to be run from a remote or local bootstrap machine. First, clone the repo and prepare the workspace directory:
On the bootstrap machine, clone the repo and prepare the workspace directory:
```bash
git clone https://github.com/clastix/kamaji
cd kamaji/deploy
```
We assume you have installed on your workstation:
We assume you have installed on the bootstrap machine:
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
@@ -33,7 +35,7 @@ We assume you have installed on your workstation:
- [jq](https://stedolan.github.io/jq/)
## Access Admin cluster
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters.
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The admin cluster acts as management cluster for all the Tenant clusters and hosts monitoring, logging, and governance of Kamaji setup, including all Tenant clusters.
Throughout the following instructions, shell variables are used to indicate values that you should adjust to your environment:
@@ -45,7 +47,7 @@ Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji
- CNI module installed, eg. [Calico](https://github.com/projectcalico/calico), [Cilium](https://github.com/cilium/cilium).
- CSI module installed with a Storage Class for the Tenant datastores. Local Persistent Volumes are an option.
- Support for LoadBalancer service type, eg. [MetalLB](https://metallb.universe.tf/), or alternatively, an Ingress Controller, eg. [ingress-nginx](https://github.com/kubernetes/ingress-nginx), [haproxy](https://github.com/haproxytech/kubernetes-ingress).
- Support for LoadBalancer service type, eg. [MetalLB](https://metallb.universe.tf/), or a Cloud based controller.
- Optionally, a Monitoring Stack installed, eg. [Prometheus](https://github.com/prometheus-community).
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into Kamaji Admin Cluster and check you can access:
@@ -54,11 +56,24 @@ Make sure you have a `kubeconfig` file with admin permissions on the cluster you
kubectl cluster-info
```
## Install Cert Manager
Kamaji takes advantage of the [dynamic admission control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), such as validating and mutating webhook configurations. These webhooks are secured by a TLS communication, and the certificates are managed by [`cert-manager`](https://cert-manager.io/), making it a prerequisite that must be installed:
```bash
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.11.0 \
--set installCRDs=true
```
## Install Kamaji Controller
Kamaji takes advantage of the [dynamic admission control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), such as validating and mutating webhook configurations. These webhooks are secured by a TLS communication, and the certificates are managed by [`cert-manager`](https://cert-manager.io/), making it a prerequisite that must be [installed](https://cert-manager.io/docs/installation/).
The Kamaji controller needs to access a default datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unamanaged `etcd`, out of box.
Installing Kamaji via Helm charts is the preferred way. The Kamaji controller needs to access a Datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unamanaged `etcd` as datastore, out of box.
Install Kamaji with `helm` using an unmanaged `etcd` as default datastore:
@@ -68,7 +83,8 @@ helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
```
A managed datastore is highly recommended in production. The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a managed multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a different storage system, as `MySQL` or `PostgreSQL` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
!!! note "A managed datastore is highly recommended in production"
The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides the code to setup a multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a more robust storage system, as `MySQL` or `PostgreSQL` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
## Create Tenant Cluster
@@ -150,7 +166,7 @@ kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.y
After a few seconds, check the created resources in the tenants namespace and when ready it will look similar to the following:
```command
kubectl -n tenants get tcp,deploy,pods,svc
kubectl -n ${TENANT_NAMESPACE} get tcp,deploy,pods,svc
NAME VERSION STATUS CONTROL-PLANE ENDPOINT KUBECONFIG DATASTORE AGE
tenantcontrolplane/tenant-00 v1.25.2 Ready 192.168.32.240:6443 tenant-00-admin-kubeconfig default 2m20s
@@ -224,7 +240,10 @@ And make sure it is `${TENANT_ADDR}:${TENANT_PORT}`.
### Prepare worker nodes to join
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other tools, as for example, Terraform.
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`.
!!! note "Cluster APIs support"
In the future, we'll provide creation of tenant clusters through Cluster APIs.
You can use the provided helper script `/deploy/nodes-prerequisites.sh`, in order to install the dependencies on all the worker nodes:
@@ -232,7 +251,8 @@ You can use the provided helper script `/deploy/nodes-prerequisites.sh`, in orde
- Install `crictl`, the command line for working with `containerd`
- Install `kubectl`, `kubelet`, and `kubeadm` in the desired version
> Warning: the script assumes all worker nodes are running `Ubuntu 20.04`. Make sure to adapt the script if you're using a different distribution.
!!! warning ""
The provided script is just a facility: it assumes all worker nodes are running `Ubuntu 20.04`. Make sure to adapt the script if you're using a different distribution.
Run the script:

View File

@@ -1,191 +0,0 @@
# Getting started with Tenant Control Planes
This document explains how to deploy a minimal Kamaji setup on [KinD](https://kind.sigs.k8s.io/) for development scopes. Please refer to the [Kamaji documentation](concepts.md) for understanding all the terms used in this guide, as for example: `admin cluster`, `tenant cluster`, and `tenant control plane`.
## Pre-requisites
We assume you have installed on your workstation:
- [Docker](https://docker.com)
- [KinD](https://kind.sigs.k8s.io/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
- [Helm](https://helm.sh/docs/intro/install/)
- [jq](https://stedolan.github.io/jq/)
- [openssl](https://www.openssl.org/)
- [cfssl/cfssljson](https://github.com/cloudflare/cfssl)
> Starting from Kamaji v0.1.0, `kubectl` and `kubeadm` need to meet at least minimum version to `v1.25.0` due to the changes regarding the `kubelet-config` ConfigMap required for the node join.
## Setup Kamaji on KinD
The instance of Kamaji is made of a single node hosting:
- admin control-plane
- admin worker
- multi-tenant datastore
### Standard Installation
You can install your KinD cluster, an `etcd` based multi-tenant datastore and the Kamaji operator with a **single command**:
```bash
$ make -C deploy/kind
```
Now you can deploy a [`TenantControlPlane`](#deploy-tenant-control-plane).
### Installation with alternative datastore drivers
Kamaji offers the possibility of using a different storage system than `etcd` for datastore, like `MySQL` or `PostgreSQL` compatible databases.
First, setup a KinD cluster and the other requirements:
```bash
$ make -C deploy/kind reqs
```
Install one of the alternative supported databases:
- **MySQL** install it with command:
`$ make -C deploy/kine/mysql mariadb`
- **PostgreSQL** install it with command:
`$ make -C deploy/kine/postgresql postgresql`
Then use Helm to install the Kamaji Operator and make sure it uses a datastore with the proper driver `datastore.driver=<MySQL|PostgreSQL>`.
For example, with a PostreSQL datastore:
```bash
helm install kamaji charts/kamaji -n kamaji-system --create-namespace \
--set etcd.deploy=false \
--set datastore.driver=PostgreSQL \
--set datastore.endpoints[0]=postgres-default-rw.kamaji-system.svc:5432 \
--set datastore.basicAuth.usernameSecret.name=postgres-default-superuser \
--set datastore.basicAuth.usernameSecret.namespace=kamaji-system \
--set datastore.basicAuth.usernameSecret.keyPath=username \
--set datastore.basicAuth.passwordSecret.name=postgres-default-superuser \
--set datastore.basicAuth.passwordSecret.namespace=kamaji-system \
--set datastore.basicAuth.passwordSecret.keyPath=password \
--set datastore.tlsConfig.certificateAuthority.certificate.name=postgres-default-ca \
--set datastore.tlsConfig.certificateAuthority.certificate.namespace=kamaji-system \
--set datastore.tlsConfig.certificateAuthority.certificate.keyPath=ca.crt \
--set datastore.tlsConfig.certificateAuthority.privateKey.name=postgres-default-ca \
--set datastore.tlsConfig.certificateAuthority.privateKey.namespace=kamaji-system \
--set datastore.tlsConfig.certificateAuthority.privateKey.keyPath=ca.key \
--set datastore.tlsConfig.clientCertificate.certificate.name=postgres-default-root-cert \
--set datastore.tlsConfig.clientCertificate.certificate.namespace=kamaji-system \
--set datastore.tlsConfig.clientCertificate.certificate.keyPath=tls.crt \
--set datastore.tlsConfig.clientCertificate.privateKey.name=postgres-default-root-cert \
--set datastore.tlsConfig.clientCertificate.privateKey.namespace=kamaji-system \
--set datastore.tlsConfig.clientCertificate.privateKey.keyPath=tls.key
```
### Deploy Tenant Control Plane
Now it is the moment of deploying your first tenant control plane.
```bash
$ kubectl apply -f - <<EOF
apiVersion: kamaji.clastix.io/v1alpha1
kind: TenantControlPlane
metadata:
name: tenant1
spec:
controlPlane:
deployment:
replicas: 2
additionalMetadata:
annotations:
environment.clastix.io: tenant1
tier.clastix.io: "0"
labels:
tenant.clastix.io: tenant1
kind.clastix.io: deployment
service:
additionalMetadata:
annotations:
environment.clastix.io: tenant1
tier.clastix.io: "0"
labels:
tenant.clastix.io: tenant1
kind.clastix.io: service
serviceType: NodePort
kubernetes:
version: "v1.23.4"
kubelet:
cgroupfs: cgroupfs
admissionControllers:
- LimitRanger
- ResourceQuota
networkProfile:
address: "172.18.0.2"
port: 31443
certSANs:
- "test.clastixlabs.io"
serviceCidr: "10.96.0.0/16"
podCidr: "10.244.0.0/16"
dnsServiceIPs:
- "10.96.0.10"
addons:
coreDNS: {}
kubeProxy: {}
EOF
```
> Check networkProfile fields according to your installation
> To let Kamaji works in kind, you have indicate that the service must be [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport)
### Get the kubeconfig
Let's retrieve kubeconfig and store in `/tmp/kubeconfig`
```bash
$ kubectl get secrets tenant1-admin-kubeconfig -o json \
| jq -r '.data["admin.conf"]' \
| base64 -d > /tmp/kubeconfig
```
It can be export it, to facilitate the next tasks:
```bash
$ export KUBECONFIG=/tmp/kubeconfig
```
### Install CNI
We highly recommend to install [kindnet](https://github.com/aojea/kindnet) as CNI for your kamaji TCP.
```bash
$ kubectl create -f https://raw.githubusercontent.com/aojea/kindnet/master/install-kindnet.yaml
```
### Join worker nodes
```bash
$ make -C deploy/kind kamaji-kind-worker-join
```
> To add more worker nodes, run again the command above.
Check out the node:
```bash
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
d2d4b468c9de Ready <none> 44s v1.23.4
```
> For more complex scenarios (exposing port, different version and so on), run `join-node.bash`.
Tenant control plane provision has been finished in a minimal Kamaji setup based on KinD. Therefore, you could develop, test and make your own experiments with Kamaji.
## Cleanup
```bash
$ make -C deploy/kind destroy
```

View File

@@ -1,34 +0,0 @@
# Install Kamaji
## Quickstart
### Pre-requisites
- [Helm](https://helm.sh/docs/intro/install/)
- Kubernetes cluster
### Install cert-manager
```shell
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.11.0 \
--set installCRDs=true
```
### Install Kamaji with default datastore
```
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
```
Now you're ready to go with Kamaji!
Please follow the documentation to start playing with it.

View File

@@ -0,0 +1,63 @@
# Use alternative datastores
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine) integration. One of the implementations is [PostgreSQL](https://www.postgresql.org/).
## Install the datastore
On the admin cluster, install one of the alternative supported datastore:
- **MySQL** install it with command:
`$ make -C deploy/kine/mysql mariadb`
- **PostgreSQL** install it with command:
`$ make -C deploy/kine/postgresql postgresql`
## Install Cert Manager
As prerequisite for Kamaji, install the Cert Manager
```bash
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.11.0 \
--set installCRDs=true
```
## Install Kamaji
Use Helm to install the Kamaji Operator and make sure it uses a datastore with the proper driver `datastore.driver=<MySQL|PostgreSQL>`.
For example, with a PostreSQL datastore installed:
```bash
helm install kamaji charts/kamaji -n kamaji-system --create-namespace \
--set etcd.deploy=false \
--set datastore.driver=PostgreSQL \
--set datastore.endpoints[0]=postgres-default-rw.kamaji-system.svc:5432 \
--set datastore.basicAuth.usernameSecret.name=postgres-default-superuser \
--set datastore.basicAuth.usernameSecret.namespace=kamaji-system \
--set datastore.basicAuth.usernameSecret.keyPath=username \
--set datastore.basicAuth.passwordSecret.name=postgres-default-superuser \
--set datastore.basicAuth.passwordSecret.namespace=kamaji-system \
--set datastore.basicAuth.passwordSecret.keyPath=password \
--set datastore.tlsConfig.certificateAuthority.certificate.name=postgres-default-ca \
--set datastore.tlsConfig.certificateAuthority.certificate.namespace=kamaji-system \
--set datastore.tlsConfig.certificateAuthority.certificate.keyPath=ca.crt \
--set datastore.tlsConfig.certificateAuthority.privateKey.name=postgres-default-ca \
--set datastore.tlsConfig.certificateAuthority.privateKey.namespace=kamaji-system \
--set datastore.tlsConfig.certificateAuthority.privateKey.keyPath=ca.key \
--set datastore.tlsConfig.clientCertificate.certificate.name=postgres-default-root-cert \
--set datastore.tlsConfig.clientCertificate.certificate.namespace=kamaji-system \
--set datastore.tlsConfig.clientCertificate.certificate.keyPath=tls.crt \
--set datastore.tlsConfig.clientCertificate.privateKey.name=postgres-default-root-cert \
--set datastore.tlsConfig.clientCertificate.privateKey.namespace=kamaji-system \
--set datastore.tlsConfig.clientCertificate.privateKey.keyPath=tls.key
```
Once installed, you will able to create Tenant Control Planes using an alternative datastore.

View File

@@ -1,31 +1,33 @@
# Setup Kamaji on Azure
This guide will lead you through the process of creating a working Kamaji setup on on MS Azure.
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
!!! warning ""
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
The guide requires:
- one bootstrap workstation
- an AKS Kubernetes cluster to run the Admin and Tenant Control Planes
- an arbitrary number of Azure virtual machines to host `Tenant`s' workloads
- a bootstrap machine
- a Kubernetes cluster to run the Admin and Tenant Control Planes
- an arbitrary number of machines to host `Tenant`s' workloads
## Summary
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
* [Access Admin cluster](#access-admin-cluster)
* [Install Cert Manager](#install-cert-manager)
* [Install Kamaji controller](#install-kamaji-controller)
* [Create Tenant Cluster](#create-tenant-cluster)
* [Cleanup](#cleanup)
## Prepare the bootstrap workspace
This guide is supposed to be run from a remote or local bootstrap machine. First, clone the repo and prepare the workspace directory:
On the bootstrap machine, clone the repo and prepare the workspace directory:
```bash
git clone https://github.com/clastix/kamaji
cd kamaji/deploy
```
We assume you have installed on your workstation:
We assume you have installed on the bootstrap machine:
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
@@ -39,10 +41,10 @@ Make sure you have a valid Azure subscription, and login to Azure:
az account set --subscription "MySubscription"
az login
```
> Currently, the Kamaji setup, including Admin and Tenant clusters need to be deployed within the same Azure region. Cross-regions deployments are not supported.
## Access Admin cluster
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters. For this guide, we're going to use an instance of Azure Kubernetes Service - AKS as the Admin Cluster.
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters. For this guide, we're going to use an instance of Azure Kubernetes Service (AKS) as Admin Cluster.
Throughout the following instructions, shell variables are used to indicate values that you should adjust to your own Azure environment:
@@ -95,11 +97,24 @@ And check you can access:
kubectl cluster-info
```
## Install Cert Manager
Kamaji takes advantage of the [dynamic admission control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), such as validating and mutating webhook configurations. These webhooks are secured by a TLS communication, and the certificates are managed by [`cert-manager`](https://cert-manager.io/), making it a prerequisite that must be installed:
```bash
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.11.0 \
--set installCRDs=true
```
## Install Kamaji Controller
Kamaji takes advantage of the [dynamic admission control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), such as validating and mutating webhook configurations. These webhooks are secured by a TLS communication, and the certificates are managed by [`cert-manager`](https://cert-manager.io/), making it a prerequisite that must be [installed](https://cert-manager.io/docs/installation/).
The Kamaji controller needs to access a default datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unamanaged `etcd`, out of box.
Installing Kamaji via Helm charts is the preferred way. The Kamaji controller needs to access a Datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unamanaged `etcd` as datastore, out of box.
Install Kamaji with `helm` using an unmanaged `etcd` as default datastore:
@@ -109,7 +124,8 @@ helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
```
A managed datastore is highly recommended in production. The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a managed multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a different storage system, as `MySQL` or `PostgreSQL` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
!!! note "A managed datastore is highly recommended in production"
The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides the code to setup a multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a more robust storage system, as `MySQL` or `PostgreSQL` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
## Create Tenant Cluster
@@ -257,7 +273,11 @@ kubernetes 10.240.0.100:6443 57m
```
### Prepare worker nodes to join
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other tools, as for example, Terrform.
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`.
!!! note "Cluster APIs support"
In the future, we'll provide creation of tenant clusters through Cluster APIs.
Create an Azure VM Stateful Set to host worker nodes

View File

@@ -1,5 +0,0 @@
# MySQL as Kubernetes Storage
Kamaji offers the possibility of having a different storage system than `ETCD` thanks to [kine](https://github.com/k3s-io/kine) integration. One of the implementations is [MySQL](https://www.mysql.com/).
> A detailed guide for production setup will be released soon. Please refer to [Getting Started Guide](../getting-started.md) for a demo setup with KinD.

View File

@@ -1,6 +0,0 @@
# PostgreSQL as Kubernetes Storage
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine) integration.
One of the implementations is [PostgreSQL](https://www.postgresql.org/).
> A detailed guide for production setup will be released soon. Please refer to [Getting Started Guide](../getting-started.md) for a demo setup with KinD.

Binary file not shown.

After

Width:  |  Height:  |  Size: 152 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 189 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 184 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View File

@@ -1,51 +1,20 @@
# Kamaji
**Kamaji** deploys and operates Kubernetes at scale with a fraction of the operational burden.
## How it works
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. What makes Kamaji special is that Control Planes of _“tenant clusters”_ are just regular pods running in the _“admin cluster”_ instead of dedicated Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate. View [Concepts](concepts.md) for a deeper understanding of principles behind Kamaji's design.
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. Kamaji is special because the Control Plane components are running in a single pod instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
![Architecture](images/kamaji-light.png#gh-light-mode-only)
![Architecture](images/kamaji-dark.png#gh-dark-mode-only)
<img src="images/architecture.png" width="600">
All the tenant clusters built with Kamaji are fully compliant [CNCF Certified Kubernetes](https://www.cncf.io/certification/software-conformance/) and are compatible with the standard toolchains everybody knows and loves.
View [Concepts](concepts.md) for a deeper understanding of principles behind Kamaji's design.
<p align="center" style="padding: 6px 6px">
<img src="https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/certified-kubernetes/versionless/color/certified-kubernetes-color.png" width="200" />
</p>
## Features
- **Self Service Kubernetes:** leave users the freedom to self-provision their Kubernetes clusters according to the assigned boundaries.
- **Multi-cluster Management:** centrally manage multiple tenant clusters from a single admin cluster. Happy SREs.
- **Cheaper Control Planes:** place multiple tenant control planes on a single node, instead of having three nodes for a single control plane.
- **Stronger Multi-Tenancy:** leave tenants to access the control plane with admin permissions while keeping the tenant isolated at the infrastructure level.
- **Kubernetes Inception:** use Kubernetes to manage Kubernetes by re-using all the Kubernetes goodies you already know and love.
- **Full APIs compliant:** tenant clusters are fully CNCF compliant built with upstream Kubernetes binaries. A user does not see differences between a Kamaji provisioned cluster and a dedicated cluster.
!!! info "CNCF Compliance"
All the tenant clusters built with Kamaji are fully compliant [CNCF Certified Kubernetes](https://www.cncf.io/certification/software-conformance/) and are compatible with the standard toolchains everybody knows and loves.
## Getting started
Please refer to the [Getting Started guide](getting-started.md) to deploy a minimal setup of Kamaji on [KinD](https://kind.sigs.k8s.io/).
Please refer to the [Getting Started guide](getting-started.md) to deploy a minimal setup of Kamaji.
## Open Source
Kamaji is Open Source with Apache 2 license and any contribution is welcome. Open an issue or suggest an enhancement on the GitHub [project's page](https://github.com/clastix/kamaji). Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
## FAQs
Q. What does Kamaji mean?
A. Kamaji is named as the character _Kamaji_ from the Japanese movie [_Spirited Away_](https://en.wikipedia.org/wiki/Spirited_Away).
Q. Is Kamaji another Kubernetes distribution?
A. No, Kamaji is a Kubernetes Operator you can install on top of any Kubernetes cluster to provide hundreds or thousands of managed Kubernetes clusters as a service. We tested Kamaji on vanilla Kubernetes 1.22+, KinD, and Azure AKS. We expect it to work smoothly on other Kubernetes distributions. The tenant clusters made with Kamaji are conformant CNCF Kubernetes clusters as we leverage [`kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
Q. Is it safe to run Kubernetes control plane components in a pod instead of dedicated virtual machines?
A. Yes, the tenant control plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
Q. You already provide a Kubernetes multi-tenancy solution with [Capsule](https://capsule.clastix.io). Why does Kamaji matter?
A. A multi-tenancy solution, like Capsule shares the Kubernetes control plane among all tenants keeping tenant namespaces isolated by policies. While the solution is the right choice by balancing between features and ease of usage, there are cases where a tenant user requires access to the control plane, for example, when a tenant requires to manage CRDs on his own. With Kamaji, you can provide cluster admin permissions to the tenant.
Q. Well you convinced me, how to get a try?
A. It is possible to get started with Kamaji on a laptop with [KinD](getting-started.md) installed.

File diff suppressed because it is too large Load Diff

View File

@@ -1,53 +1,66 @@
site_name: Kamaji
repo_name: clastix/kamaji
repo_url: https://github.com/clastix/kamaji
site_name: Kamaji
site_url: https://kamaji.clastix.io/
docs_dir: content
site_dir: site
site_author: bsctl
site_description: >-
Kamaji deploys and operates Kubernetes Control Plane at scale with a fraction of the operational burden.
copyright: Copyright &copy; 2020 - 2023 Clastix Labs
theme:
name: material
features:
- navigation.tabs
- navigation.tabs.sticky
- navigation.indexes
- navigation.instant
- navigation.sections
- navigation.path
- navigation.footer
- content.code.copy
include_sidebar: true
palette:
# Palette toggle for automatic mode
- media: "(prefers-color-scheme)"
primary: white
toggle:
icon: material/brightness-auto
name: Switch to light mode
# Palette toggle for light mode
- media: "(prefers-color-scheme: light)"
scheme: default
scheme: default
primary: white
toggle:
icon: material/lightbulb
name: Switch to dark mode
# Palette toggle for dark mode
- media: "(prefers-color-scheme: dark)"
scheme: slate
primary: white
toggle:
icon: material/lightbulb-outline
name: Switch to system preference
favicon: images/favicon.png
logo: images/logo.png
markdown_extensions:
- admonition
- attr_list
- def_list
- md_in_html
# Generate navigation bar
nav:
- 'Kamaji': index.md
- 'Getting started':
- getting-started/install.md
- getting-started/getting-started.md
- 'Getting started': getting-started.md
- 'Concepts': concepts.md
- 'Guides':
- guides/index.md
- guides/kamaji-deployment-guide.md
- guides/kamaji-azure-deployment-guide.md
- guides/postgresql-datastore.md
- guides/mysql-datastore.md
- guides/kamaji-azure-deployment.md
- guides/alternative-datastore.md
- guides/kamaji-gitops-flux.md
- guides/upgrade.md
- guides/datastore-migration.md
@@ -61,4 +74,3 @@ nav:
- reference/versioning.md
- reference/api.md
- 'Contribute': contribute.md

View File

@@ -0,0 +1,179 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package e2e
import (
"context"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
)
var _ = Describe("Deploy a TenantControlPlane resource with additional resources", func() {
// TenantControlPlane object with additional resources
tcp := &kamajiv1alpha1.TenantControlPlane{
ObjectMeta: metav1.ObjectMeta{
Name: "validated-additional-resources",
Namespace: "default",
},
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
ControlPlane: kamajiv1alpha1.ControlPlane{
Deployment: kamajiv1alpha1.DeploymentSpec{
Replicas: 1,
AdditionalInitContainers: []corev1.Container{{
Name: initContainerName,
Image: initContainerImage,
Command: []string{
"/bin/sh",
"-c",
"echo hello world",
},
}},
AdditionalContainers: []corev1.Container{{
Name: additionalContainerName,
Image: additionalContainerImage,
}},
AdditionalVolumes: []corev1.Volume{
{
Name: apiServerVolumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "api-server",
},
},
},
},
{
Name: controllerManagerVolumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "controller-manager",
},
},
},
},
{
Name: schedulerVolumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "scheduler",
},
},
},
},
},
AdditionalVolumeMounts: &kamajiv1alpha1.AdditionalVolumeMounts{
APIServer: []corev1.VolumeMount{
{
Name: apiServerVolumeName,
MountPath: "/etc/api-server",
},
},
ControllerManager: []corev1.VolumeMount{
{
Name: controllerManagerVolumeName,
MountPath: "/etc/controller-manager",
},
},
Scheduler: []corev1.VolumeMount{
{
Name: schedulerVolumeName,
MountPath: "/etc/scheduler",
},
},
},
},
Service: kamajiv1alpha1.ServiceSpec{
ServiceType: "ClusterIP",
},
},
Kubernetes: kamajiv1alpha1.KubernetesSpec{
Version: "v1.23.6",
},
},
}
apiServerConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "api-server",
Namespace: tcp.Namespace,
},
Data: map[string]string{
"api-server": "true",
},
}
controllerManagerConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "controller-manager",
Namespace: tcp.Namespace,
},
Data: map[string]string{
"controller-manager": "true",
},
}
schedulerConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "scheduler",
Namespace: tcp.Namespace,
},
Data: map[string]string{
"scheduler": "true",
},
}
// Create a TenantControlPlane resource into the cluster
JustBeforeEach(func() {
Expect(k8sClient.Create(context.Background(), apiServerConfigMap)).NotTo(HaveOccurred())
Expect(k8sClient.Create(context.Background(), controllerManagerConfigMap)).NotTo(HaveOccurred())
Expect(k8sClient.Create(context.Background(), schedulerConfigMap)).NotTo(HaveOccurred())
Expect(k8sClient.Create(context.Background(), tcp)).NotTo(HaveOccurred())
})
// Delete the TenantControlPlane resource after test is finished
JustAfterEach(func() {
Expect(k8sClient.Delete(context.Background(), tcp)).Should(Succeed())
Expect(k8sClient.Delete(context.Background(), apiServerConfigMap)).NotTo(HaveOccurred())
Expect(k8sClient.Delete(context.Background(), controllerManagerConfigMap)).NotTo(HaveOccurred())
Expect(k8sClient.Delete(context.Background(), schedulerConfigMap)).NotTo(HaveOccurred())
})
It("should block wrong Deployment configuration", func() {
// Should be ready
StatusMustEqualTo(tcp, kamajiv1alpha1.VersionReady)
By("duplicating mount path", func() {
Consistently(func() error {
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: tcp.Name, Namespace: tcp.Namespace}, tcp)).NotTo(HaveOccurred())
lastVolumeMountIndex := len(tcp.Spec.ControlPlane.Deployment.AdditionalVolumeMounts.APIServer) - 1
additionalVolumeMount := tcp.Spec.ControlPlane.Deployment.AdditionalVolumeMounts.APIServer[lastVolumeMountIndex]
additionalVolumeMount.Name = "duplicated"
tcp.Spec.ControlPlane.Deployment.AdditionalVolumeMounts.APIServer = append(tcp.Spec.ControlPlane.Deployment.AdditionalVolumeMounts.APIServer, additionalVolumeMount)
return k8sClient.Update(context.Background(), tcp)
}, 10*time.Second, time.Second).ShouldNot(Succeed())
})
By("duplicating container", func() {
Consistently(func() error {
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: tcp.Name, Namespace: tcp.Namespace}, tcp)).NotTo(HaveOccurred())
tcp.Spec.ControlPlane.Deployment.AdditionalContainers = append(tcp.Spec.ControlPlane.Deployment.AdditionalContainers, corev1.Container{
Name: "kube-apiserver",
Image: "mocked",
})
return k8sClient.Update(context.Background(), tcp)
}, 10*time.Second, time.Second).ShouldNot(Succeed())
})
})
})

View File

@@ -0,0 +1,323 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package e2e
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/utilities"
)
const (
namespace = "default"
tcpName = "tcp-additional"
initContainerName = "init"
initContainerImage = "registry.k8s.io/e2e-test-images/busybox:1.29-4"
additionalContainerName = "nginx"
additionalContainerImage = "registry.k8s.io/e2e-test-images/nginx:1.15-4"
apiServerVolumeName = "api-server-volume"
controllerManagerVolumeName = "controller-manager-volume"
schedulerVolumeName = "scheduler-volume"
)
var _ = Describe("Deploy a TenantControlPlane resource with additional options", func() {
// TenantControlPlane object with additional resources
tcp := &kamajiv1alpha1.TenantControlPlane{
ObjectMeta: metav1.ObjectMeta{
Name: tcpName,
Namespace: namespace,
},
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
ControlPlane: kamajiv1alpha1.ControlPlane{
Deployment: kamajiv1alpha1.DeploymentSpec{
Replicas: 1,
AdditionalInitContainers: []corev1.Container{{
Name: initContainerName,
Image: initContainerImage,
Command: []string{
"/bin/sh",
"-c",
"echo hello world",
},
}},
AdditionalContainers: []corev1.Container{{
Name: additionalContainerName,
Image: additionalContainerImage,
}},
AdditionalVolumes: []corev1.Volume{
{
Name: apiServerVolumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "api-server",
},
},
},
},
{
Name: controllerManagerVolumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "controller-manager",
},
},
},
},
{
Name: schedulerVolumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "scheduler",
},
},
},
},
},
AdditionalVolumeMounts: &kamajiv1alpha1.AdditionalVolumeMounts{
APIServer: []corev1.VolumeMount{
{
Name: apiServerVolumeName,
MountPath: "/etc/api-server",
},
},
ControllerManager: []corev1.VolumeMount{
{
Name: controllerManagerVolumeName,
MountPath: "/etc/controller-manager",
},
},
Scheduler: []corev1.VolumeMount{
{
Name: schedulerVolumeName,
MountPath: "/etc/scheduler",
},
},
},
},
Service: kamajiv1alpha1.ServiceSpec{
ServiceType: "ClusterIP",
},
},
NetworkProfile: kamajiv1alpha1.NetworkProfileSpec{
Address: "172.18.0.2",
},
Kubernetes: kamajiv1alpha1.KubernetesSpec{
Version: "v1.23.6",
Kubelet: kamajiv1alpha1.KubeletSpec{
CGroupFS: "cgroupfs",
},
AdmissionControllers: kamajiv1alpha1.AdmissionControllers{
"LimitRanger",
"ResourceQuota",
},
},
Addons: kamajiv1alpha1.AddonsSpec{},
},
}
apiServerConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "api-server",
Namespace: namespace,
},
Data: map[string]string{
"api-server": "true",
},
}
controllerManagerConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "controller-manager",
Namespace: namespace,
},
Data: map[string]string{
"controller-manager": "true",
},
}
schedulerConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "scheduler",
Namespace: namespace,
},
Data: map[string]string{
"scheduler": "true",
},
}
// Create a TenantControlPlane resource into the cluster
JustBeforeEach(func() {
Expect(k8sClient.Create(context.Background(), apiServerConfigMap)).NotTo(HaveOccurred())
Expect(k8sClient.Create(context.Background(), controllerManagerConfigMap)).NotTo(HaveOccurred())
Expect(k8sClient.Create(context.Background(), schedulerConfigMap)).NotTo(HaveOccurred())
Expect(k8sClient.Create(context.Background(), tcp)).NotTo(HaveOccurred())
})
// Delete the TenantControlPlane resource after test is finished
JustAfterEach(func() {
Expect(k8sClient.Delete(context.Background(), tcp)).Should(Succeed())
Expect(k8sClient.Delete(context.Background(), apiServerConfigMap)).NotTo(HaveOccurred())
Expect(k8sClient.Delete(context.Background(), controllerManagerConfigMap)).NotTo(HaveOccurred())
Expect(k8sClient.Delete(context.Background(), schedulerConfigMap)).NotTo(HaveOccurred())
})
It("should have the additional resources", func() {
// Should be ready
StatusMustEqualTo(tcp, kamajiv1alpha1.VersionReady)
// Should have a TCP deployment
deploy := appsv1.Deployment{}
Expect(k8sClient.Get(context.Background(), types.NamespacedName{
Name: tcpName,
Namespace: namespace,
}, &deploy)).NotTo(HaveOccurred())
By("checking additional init containers", func() {
found, _ := utilities.HasNamedContainer(deploy.Spec.Template.Spec.InitContainers, initContainerName)
Expect(found).To(BeTrue(), "Should have the configured AdditionalInitContainers")
})
By("checking additional containers", func() {
found, _ := utilities.HasNamedContainer(deploy.Spec.Template.Spec.Containers, additionalContainerName)
Expect(found).To(BeTrue(), "Should have the configured AdditionalContainers")
})
By("checking kube-apiserver volumes", func() {
found, _ := utilities.HasNamedVolume(deploy.Spec.Template.Spec.Volumes, apiServerVolumeName)
Expect(found).To(BeTrue())
found, containerIndex := utilities.HasNamedContainer(deploy.Spec.Template.Spec.Containers, "kube-apiserver")
Expect(found).To(BeTrue())
found, _ = utilities.HasNamedVolumeMount(deploy.Spec.Template.Spec.Containers[containerIndex].VolumeMounts, apiServerVolumeName)
Expect(found).To(BeTrue())
})
By("checking kube-scheduler volumes", func() {
found, _ := utilities.HasNamedVolume(deploy.Spec.Template.Spec.Volumes, schedulerVolumeName)
Expect(found).To(BeTrue())
found, containerIndex := utilities.HasNamedContainer(deploy.Spec.Template.Spec.Containers, "kube-scheduler")
Expect(found).To(BeTrue())
found, _ = utilities.HasNamedVolumeMount(deploy.Spec.Template.Spec.Containers[containerIndex].VolumeMounts, schedulerVolumeName)
Expect(found).To(BeTrue())
})
By("checking kube-controller-manager volumes", func() {
found, _ := utilities.HasNamedVolume(deploy.Spec.Template.Spec.Volumes, controllerManagerVolumeName)
Expect(found).To(BeTrue())
found, containerIndex := utilities.HasNamedContainer(deploy.Spec.Template.Spec.Containers, "kube-controller-manager")
Expect(found).To(BeTrue())
found, _ = utilities.HasNamedVolumeMount(deploy.Spec.Template.Spec.Containers[containerIndex].VolumeMounts, controllerManagerVolumeName)
Expect(found).To(BeTrue())
})
By("removing the additional resources", func() {
var containerName string
volumeNames, volumeMounts := sets.New[string](), tcp.Spec.ControlPlane.Deployment.AdditionalVolumeMounts
Eventually(func() error {
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: tcp.Name, Namespace: tcp.Namespace}, tcp)).NotTo(HaveOccurred())
containerName = tcp.Spec.ControlPlane.Deployment.AdditionalContainers[0].Name
for _, volume := range tcp.Spec.ControlPlane.Deployment.AdditionalVolumes {
volumeNames.Insert(volume.Name)
}
tcp.Spec.ControlPlane.Deployment.AdditionalInitContainers = nil
tcp.Spec.ControlPlane.Deployment.AdditionalContainers = nil
tcp.Spec.ControlPlane.Deployment.AdditionalVolumeMounts = nil
tcp.Spec.ControlPlane.Deployment.AdditionalVolumes = nil
return k8sClient.Update(context.Background(), tcp)
}, 10*time.Second, time.Second).ShouldNot(HaveOccurred())
Eventually(func() []corev1.Container {
deploy := appsv1.Deployment{}
Expect(k8sClient.Get(context.Background(), types.NamespacedName{
Name: tcpName,
Namespace: namespace,
}, &deploy)).NotTo(HaveOccurred())
return deploy.Spec.Template.Spec.InitContainers
}, 10*time.Second, time.Second).Should(HaveLen(0), "Deployment should not contain anymore the init container")
Eventually(func() bool {
deploy := appsv1.Deployment{}
Expect(k8sClient.Get(context.Background(), types.NamespacedName{
Name: tcpName,
Namespace: namespace,
}, &deploy)).NotTo(HaveOccurred())
found, _ := utilities.HasNamedContainer(deploy.Spec.Template.Spec.Containers, containerName)
return found
}, 10*time.Second, time.Second).Should(BeFalse(), "Deployment should not contain anymore the additional container")
Eventually(func() error {
deploy := appsv1.Deployment{}
Expect(k8sClient.Get(context.Background(), types.NamespacedName{
Name: tcpName,
Namespace: namespace,
}, &deploy)).NotTo(HaveOccurred())
for _, volume := range deploy.Spec.Template.Spec.Volumes {
if volumeNames.Has(volume.Name) {
return fmt.Errorf("extra volume with name %s is still present", volume.Name)
}
}
type testCase struct {
containerName string
volumeMounts []corev1.VolumeMount
}
for _, tc := range []testCase{
{
containerName: "kube-scheduler",
volumeMounts: volumeMounts.Scheduler,
},
{
containerName: "kube-apiserver",
volumeMounts: volumeMounts.APIServer,
},
{
containerName: "kube-scheduler",
volumeMounts: volumeMounts.Scheduler,
},
} {
for _, volumeMount := range tc.volumeMounts {
found, containerIndex := utilities.HasNamedContainer(deploy.Spec.Template.Spec.Containers, tc.containerName)
if !found {
return fmt.Errorf("expected %s, container not found", tc.containerName)
}
found, _ = utilities.HasNamedVolumeMount(deploy.Spec.Template.Spec.Containers[containerIndex].VolumeMounts, volumeMount.Name)
if found {
return fmt.Errorf("extra volume mount with name %s is still present", volumeMount.Name)
}
}
}
return nil
}, 10*time.Second, time.Second).Should(BeNil(), "Deployment should not contain anymore the extra volumes")
})
})
})

4
go.mod
View File

@@ -8,6 +8,7 @@ require (
github.com/go-logr/logr v1.2.3
github.com/go-pg/pg/v10 v10.10.6
github.com/go-sql-driver/mysql v1.6.0
github.com/google/go-cmp v0.5.9
github.com/google/uuid v1.3.0
github.com/json-iterator/go v1.1.12
github.com/juju/mutex/v2 v2.0.0
@@ -21,6 +22,7 @@ require (
go.etcd.io/etcd/api/v3 v3.5.6
go.etcd.io/etcd/client/v3 v3.5.6
go.uber.org/automaxprocs v1.5.1
gomodules.xyz/jsonpatch/v2 v2.2.0
k8s.io/api v0.26.1
k8s.io/apimachinery v0.26.1
k8s.io/apiserver v0.26.1
@@ -75,7 +77,6 @@ require (
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
@@ -136,7 +137,6 @@ require (
golang.org/x/text v0.5.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/api v0.63.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,263 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package controlplane
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/utilities"
)
const (
AgentName = "konnectivity-agent"
CertCommonName = "system:konnectivity-server"
konnectivityEgressSelectorConfigurationPath = "/etc/kubernetes/konnectivity/configurations/egress-selector-configuration.yaml"
konnectivityServerName = "konnectivity-server"
konnectivityServerPath = "/run/konnectivity"
egressSelectorConfigurationVolume = "egress-selector-configuration"
konnectivityUDSVolume = "konnectivity-uds"
konnectivityServerKubeconfigVolume = "konnectivity-server-kubeconfig"
)
type Konnectivity struct {
Scheme runtime.Scheme
}
func (k Konnectivity) buildKonnectivityContainer(addon *kamajiv1alpha1.KonnectivitySpec, replicas int32, podSpec *corev1.PodSpec) {
found, index := utilities.HasNamedContainer(podSpec.Containers, konnectivityServerName)
if !found {
index = len(podSpec.Containers)
podSpec.Containers = append(podSpec.Containers, corev1.Container{})
}
podSpec.Containers[index].Name = konnectivityServerName
podSpec.Containers[index].Image = fmt.Sprintf("%s:%s", addon.KonnectivityServerSpec.Image, addon.KonnectivityServerSpec.Version)
podSpec.Containers[index].Command = []string{"/proxy-server"}
args := utilities.ArgsFromSliceToMap(addon.KonnectivityServerSpec.ExtraArgs)
args["--uds-name"] = fmt.Sprintf("%s/konnectivity-server.socket", konnectivityServerPath)
args["--cluster-cert"] = "/etc/kubernetes/pki/apiserver.crt"
args["--cluster-key"] = "/etc/kubernetes/pki/apiserver.key"
args["--mode"] = "grpc"
args["--server-port"] = "0"
args["--agent-port"] = fmt.Sprintf("%d", addon.KonnectivityServerSpec.Port)
args["--admin-port"] = "8133"
args["--health-port"] = "8134"
args["--agent-namespace"] = "kube-system"
args["--agent-service-account"] = AgentName
args["--kubeconfig"] = "/etc/kubernetes/konnectivity-server.conf"
args["--authentication-audience"] = CertCommonName
args["--server-count"] = fmt.Sprintf("%d", replicas)
podSpec.Containers[index].Args = utilities.ArgsFromMapToSlice(args)
podSpec.Containers[index].LivenessProbe = &corev1.Probe{
InitialDelaySeconds: 30,
TimeoutSeconds: 60,
PeriodSeconds: 10,
SuccessThreshold: 1,
FailureThreshold: 3,
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(8134),
Scheme: corev1.URISchemeHTTP,
},
},
}
podSpec.Containers[index].Ports = []corev1.ContainerPort{
{
Name: "agentport",
ContainerPort: addon.KonnectivityServerSpec.Port,
Protocol: corev1.ProtocolTCP,
},
{
Name: "adminport",
ContainerPort: 8133,
Protocol: corev1.ProtocolTCP,
},
{
Name: "healthport",
ContainerPort: 8134,
Protocol: corev1.ProtocolTCP,
},
}
podSpec.Containers[index].VolumeMounts = []corev1.VolumeMount{
{
Name: "etc-kubernetes-pki",
MountPath: "/etc/kubernetes/pki",
ReadOnly: true,
},
{
Name: "konnectivity-server-kubeconfig",
MountPath: "/etc/kubernetes/konnectivity-server.conf",
SubPath: "konnectivity-server.conf",
ReadOnly: true,
},
{
Name: konnectivityUDSVolume,
MountPath: konnectivityServerPath,
ReadOnly: false,
},
}
podSpec.Containers[index].ImagePullPolicy = corev1.PullAlways
podSpec.Containers[index].Resources = corev1.ResourceRequirements{
Limits: nil,
Requests: nil,
}
if resources := addon.KonnectivityServerSpec.Resources; resources != nil {
podSpec.Containers[index].Resources.Limits = resources.Limits
podSpec.Containers[index].Resources.Requests = resources.Requests
}
}
func (k Konnectivity) RemovingVolumeMounts(podSpec *corev1.PodSpec) {
found, index := utilities.HasNamedContainer(podSpec.Containers, apiServerContainerName)
if !found {
return
}
for _, volumeMountName := range []string{konnectivityUDSVolume, egressSelectorConfigurationVolume, konnectivityServerKubeconfigVolume} {
if ok, i := utilities.HasNamedVolumeMount(podSpec.Containers[index].VolumeMounts, volumeMountName); ok {
var volumesMounts []corev1.VolumeMount
volumesMounts = append(volumesMounts, podSpec.Containers[index].VolumeMounts[:i]...)
volumesMounts = append(volumesMounts, podSpec.Containers[index].VolumeMounts[i+1:]...)
podSpec.Containers[index].VolumeMounts = volumesMounts
}
}
}
func (k Konnectivity) RemovingVolumes(podSpec *corev1.PodSpec) {
for _, volumeName := range []string{konnectivityUDSVolume, egressSelectorConfigurationVolume} {
if volumeFound, volumeIndex := utilities.HasNamedVolume(podSpec.Volumes, volumeName); volumeFound {
var volumes []corev1.Volume
volumes = append(volumes, podSpec.Volumes[:volumeIndex]...)
volumes = append(volumes, podSpec.Volumes[volumeIndex+1:]...)
podSpec.Volumes = volumes
}
}
}
func (k Konnectivity) RemovingKubeAPIServerContainerArg(podSpec *corev1.PodSpec) {
if found, index := utilities.HasNamedContainer(podSpec.Containers, apiServerContainerName); found {
argsMap := utilities.ArgsFromSliceToMap(podSpec.Containers[index].Args)
if utilities.ArgsRemoveFlag(argsMap, "--egress-selector-config-file") {
podSpec.Containers[index].Args = utilities.ArgsFromMapToSlice(argsMap)
}
}
}
func (k Konnectivity) RemovingContainer(podSpec *corev1.PodSpec) {
if found, index := utilities.HasNamedContainer(podSpec.Containers, konnectivityServerName); found {
var containers []corev1.Container
containers = append(containers, podSpec.Containers[:index]...)
containers = append(containers, podSpec.Containers[index+1:]...)
podSpec.Containers = containers
}
}
func (k Konnectivity) buildVolumeMounts(podSpec *corev1.PodSpec) {
found, index := utilities.HasNamedContainer(podSpec.Containers, apiServerContainerName)
if !found {
return
}
// Adding the egress selector config file flag
args := utilities.ArgsFromSliceToMap(podSpec.Containers[index].Args)
utilities.ArgsAddFlagValue(args, "--egress-selector-config-file", konnectivityEgressSelectorConfigurationPath)
podSpec.Containers[index].Args = utilities.ArgsFromMapToSlice(args)
vFound, vIndex := false, 0
// Patching the volume mounts
if vFound, vIndex = utilities.HasNamedVolumeMount(podSpec.Containers[index].VolumeMounts, konnectivityUDSVolume); !vFound {
vIndex = len(podSpec.Containers[index].VolumeMounts)
podSpec.Containers[index].VolumeMounts = append(podSpec.Containers[index].VolumeMounts, corev1.VolumeMount{})
}
podSpec.Containers[index].VolumeMounts[vIndex].Name = konnectivityUDSVolume
podSpec.Containers[index].VolumeMounts[vIndex].ReadOnly = false
podSpec.Containers[index].VolumeMounts[vIndex].MountPath = konnectivityServerPath
if vFound, vIndex = utilities.HasNamedVolumeMount(podSpec.Containers[index].VolumeMounts, egressSelectorConfigurationVolume); !vFound {
vIndex = len(podSpec.Containers[index].VolumeMounts)
podSpec.Containers[index].VolumeMounts = append(podSpec.Containers[index].VolumeMounts, corev1.VolumeMount{})
}
podSpec.Containers[index].VolumeMounts[vIndex].Name = egressSelectorConfigurationVolume
podSpec.Containers[index].VolumeMounts[vIndex].ReadOnly = false
podSpec.Containers[index].VolumeMounts[vIndex].MountPath = "/etc/kubernetes/konnectivity/configurations"
}
func (k Konnectivity) buildVolumes(status kamajiv1alpha1.KonnectivityStatus, podSpec *corev1.PodSpec) {
found, index := false, 0
// Defining volumes for the UDS socket
found, index = utilities.HasNamedVolume(podSpec.Volumes, konnectivityUDSVolume)
if !found {
index = len(podSpec.Volumes)
podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{})
}
podSpec.Volumes[index].Name = konnectivityUDSVolume
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: "Memory",
},
}
// Defining volumes for the egress selector configuration
found, index = utilities.HasNamedVolume(podSpec.Volumes, egressSelectorConfigurationVolume)
if !found {
podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{})
index = len(podSpec.Volumes) - 1
}
podSpec.Volumes[index].Name = egressSelectorConfigurationVolume
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: status.ConfigMap.Name,
},
DefaultMode: pointer.Int32(420),
},
}
// Defining volume for the Konnectivity kubeconfig
found, index = utilities.HasNamedVolume(podSpec.Volumes, konnectivityServerKubeconfigVolume)
if !found {
podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{})
index = len(podSpec.Volumes) - 1
}
podSpec.Volumes[index].Name = konnectivityServerKubeconfigVolume
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: status.Kubeconfig.SecretName,
DefaultMode: pointer.Int32(420),
},
}
}
func (k Konnectivity) Build(deployment *appsv1.Deployment, tenantControlPlane kamajiv1alpha1.TenantControlPlane) {
k.buildKonnectivityContainer(tenantControlPlane.Spec.Addons.Konnectivity, tenantControlPlane.Spec.ControlPlane.Deployment.Replicas, &deployment.Spec.Template.Spec)
k.buildVolumeMounts(&deployment.Spec.Template.Spec)
k.buildVolumes(tenantControlPlane.Status.Addons.Konnectivity, &deployment.Spec.Template.Spec)
k.Scheme.Default(deployment)
}

View File

@@ -5,18 +5,11 @@ package resources
import (
"context"
"crypto/md5"
"fmt"
"sort"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
builder "github.com/clastix/kamaji/internal/builders/controlplane"
@@ -62,34 +55,11 @@ func (r *KubernetesDeploymentResource) Define(_ context.Context, tenantControlPl
func (r *KubernetesDeploymentResource) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
logger := log.FromContext(ctx, "resource", r.GetName())
address, _, err := tenantControlPlane.AssignedControlPlaneAddress()
if err != nil {
logger.Error(err, "cannot retrieve Tenant Control Plane address")
return err
}
d := builder.Deployment{
Address: address,
(builder.Deployment{
Client: r.Client,
DataStore: r.DataStore,
KineContainerImage: r.KineContainerImage,
}
d.SetLabels(r.resource, utilities.MergeMaps(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()), tenantControlPlane.Spec.ControlPlane.Deployment.AdditionalMetadata.Labels))
d.SetAnnotations(r.resource, utilities.MergeMaps(r.resource.Annotations, tenantControlPlane.Spec.ControlPlane.Deployment.AdditionalMetadata.Annotations))
d.SetTemplateLabels(&r.resource.Spec.Template, r.deploymentTemplateLabels(ctx, tenantControlPlane))
d.SetNodeSelector(&r.resource.Spec.Template.Spec, tenantControlPlane)
d.SetToleration(&r.resource.Spec.Template.Spec, tenantControlPlane)
d.SetAffinity(&r.resource.Spec.Template.Spec, tenantControlPlane)
d.SetStrategy(&r.resource.Spec, tenantControlPlane)
d.SetSelector(&r.resource.Spec, tenantControlPlane)
d.SetTopologySpreadConstraints(&r.resource.Spec, tenantControlPlane.Spec.ControlPlane.Deployment.TopologySpreadConstraints)
d.SetRuntimeClass(&r.resource.Spec.Template.Spec, tenantControlPlane)
d.SetReplicas(&r.resource.Spec, tenantControlPlane)
d.ResetKubeAPIServerFlags(r.resource, tenantControlPlane)
d.SetContainers(&r.resource.Spec.Template.Spec, tenantControlPlane, address)
d.SetVolumes(&r.resource.Spec.Template.Spec, tenantControlPlane)
}).Build(ctx, r.resource, *tenantControlPlane)
return controllerutil.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}
@@ -127,30 +97,6 @@ func (r *KubernetesDeploymentResource) UpdateTenantControlPlaneStatus(_ context.
return nil
}
func (r *KubernetesDeploymentResource) deploymentTemplateLabels(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (labels map[string]string) {
hash := func(ctx context.Context, namespace, secretName string) string {
h, _ := r.SecretHashValue(ctx, r.Client, namespace, secretName)
return h
}
labels = map[string]string{
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
"component.kamaji.clastix.io/api-server-certificate": hash(ctx, tenantControlPlane.GetNamespace(), tenantControlPlane.Status.Certificates.APIServer.SecretName),
"component.kamaji.clastix.io/api-server-kubelet-client-certificate": hash(ctx, tenantControlPlane.GetNamespace(), tenantControlPlane.Status.Certificates.APIServerKubeletClient.SecretName),
"component.kamaji.clastix.io/ca": hash(ctx, tenantControlPlane.GetNamespace(), tenantControlPlane.Status.Certificates.CA.SecretName),
"component.kamaji.clastix.io/controller-manager-kubeconfig": hash(ctx, tenantControlPlane.GetNamespace(), tenantControlPlane.Status.KubeConfig.ControllerManager.SecretName),
"component.kamaji.clastix.io/front-proxy-ca-certificate": hash(ctx, tenantControlPlane.GetNamespace(), tenantControlPlane.Status.Certificates.FrontProxyCA.SecretName),
"component.kamaji.clastix.io/front-proxy-client-certificate": hash(ctx, tenantControlPlane.GetNamespace(), tenantControlPlane.Status.Certificates.FrontProxyClient.SecretName),
"component.kamaji.clastix.io/service-account": hash(ctx, tenantControlPlane.GetNamespace(), tenantControlPlane.Status.Certificates.SA.SecretName),
"component.kamaji.clastix.io/scheduler-kubeconfig": hash(ctx, tenantControlPlane.GetNamespace(), tenantControlPlane.Status.KubeConfig.Scheduler.SecretName),
"component.kamaji.clastix.io/datastore": tenantControlPlane.Spec.DataStore,
}
return labels
}
func (r *KubernetesDeploymentResource) isProgressingUpgrade() bool {
if r.resource.ObjectMeta.GetGeneration() != r.resource.Status.ObservedGeneration {
return true
@@ -176,33 +122,3 @@ func (r *KubernetesDeploymentResource) isProvisioning(tenantControlPlane *kamaji
func (r *KubernetesDeploymentResource) isNotReady() bool {
return r.resource.Status.ReadyReplicas == 0
}
// SecretHashValue function returns the md5 value for the secret of the given name and namespace.
func (r *KubernetesDeploymentResource) SecretHashValue(ctx context.Context, client client.Client, namespace, name string) (string, error) {
secret := &corev1.Secret{}
if err := client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, secret); err != nil {
return "", errors.Wrap(err, "cannot retrieve *corev1.Secret for resource version retrieval")
}
return r.HashValue(*secret), nil
}
// HashValue function returns the md5 value for the given secret.
func (r *KubernetesDeploymentResource) HashValue(secret corev1.Secret) string {
// Go access map values in random way, it means we have to sort them.
keys := make([]string, 0, len(secret.Data))
for k := range secret.Data {
keys = append(keys, k)
}
sort.Strings(keys)
// Generating MD5 of Secret values, sorted by key
h := md5.New()
for _, key := range keys {
h.Write(secret.Data[key])
}
return fmt.Sprintf("%x", h.Sum(nil))
}

View File

@@ -7,33 +7,22 @@ import (
"context"
"fmt"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
builder "github.com/clastix/kamaji/internal/builders/controlplane"
"github.com/clastix/kamaji/internal/utilities"
)
const (
konnectivityEgressSelectorConfigurationPath = "/etc/kubernetes/konnectivity/configurations/egress-selector-configuration.yaml"
konnectivityServerName = "konnectivity-server"
konnectivityServerPath = "/run/konnectivity"
egressSelectorConfigurationVolume = "egress-selector-configuration"
konnectivityUDSVolume = "konnectivity-uds"
konnectivityServerKubeconfigVolume = "konnectivity-server-kubeconfig"
)
type KubernetesDeploymentResource struct {
resource *appsv1.Deployment
Client client.Client
Builder builder.Konnectivity
Client client.Client
}
func (r *KubernetesDeploymentResource) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -57,52 +46,17 @@ func (r *KubernetesDeploymentResource) CleanUp(ctx context.Context, _ *kamajiv1a
logger.Info("performing clean-up from Deployment of Konnectivity")
res, err := utilities.CreateOrUpdateWithConflict(ctx, r.Client, r.resource, func() error {
if found, index := utilities.HasNamedContainer(r.resource.Spec.Template.Spec.Containers, konnectivityServerName); found {
logger.Info("removing Konnectivity container")
logger.Info("removing Konnectivity container")
r.Builder.RemovingContainer(&r.resource.Spec.Template.Spec)
var containers []corev1.Container
logger.Info("removing egress selector configuration file from kube-apiserver container")
r.Builder.RemovingKubeAPIServerContainerArg(&r.resource.Spec.Template.Spec)
containers = append(containers, r.resource.Spec.Template.Spec.Containers[:index]...)
containers = append(containers, r.resource.Spec.Template.Spec.Containers[index+1:]...)
logger.Info("removing Konnectivity volumes")
r.Builder.RemovingVolumes(&r.resource.Spec.Template.Spec)
r.resource.Spec.Template.Spec.Containers = containers
}
if found, index := utilities.HasNamedContainer(r.resource.Spec.Template.Spec.Containers, "kube-apiserver"); found {
argsMap := utilities.ArgsFromSliceToMap(r.resource.Spec.Template.Spec.Containers[index].Args)
if utilities.ArgsRemoveFlag(argsMap, "--egress-selector-config-file") {
logger.Info("removing egress selector configuration file from kube-apiserver container")
r.resource.Spec.Template.Spec.Containers[index].Args = utilities.ArgsFromMapToSlice(argsMap)
}
for _, volumeName := range []string{konnectivityUDSVolume, egressSelectorConfigurationVolume} {
if volumeFound, volumeIndex := utilities.HasNamedVolume(r.resource.Spec.Template.Spec.Volumes, volumeName); volumeFound {
logger.Info("removing Konnectivity volume " + volumeName)
var volumes []corev1.Volume
volumes = append(volumes, r.resource.Spec.Template.Spec.Volumes[:volumeIndex]...)
volumes = append(volumes, r.resource.Spec.Template.Spec.Volumes[volumeIndex+1:]...)
r.resource.Spec.Template.Spec.Volumes = volumes
}
}
for _, volumeMountName := range []string{konnectivityUDSVolume, egressSelectorConfigurationVolume, konnectivityServerKubeconfigVolume} {
if ok, i := utilities.HasNamedVolumeMount(r.resource.Spec.Template.Spec.Containers[index].VolumeMounts, volumeMountName); ok {
logger.Info("removing Konnectivity volume mount " + volumeMountName)
var volumesMounts []corev1.VolumeMount
volumesMounts = append(volumesMounts, r.resource.Spec.Template.Spec.Containers[index].VolumeMounts[:i]...)
volumesMounts = append(volumesMounts, r.resource.Spec.Template.Spec.Containers[index].VolumeMounts[i+1:]...)
r.resource.Spec.Template.Spec.Containers[index].VolumeMounts = volumesMounts
}
}
}
logger.Info("removing Konnectivity volume mounts")
r.Builder.RemovingVolumeMounts(&r.resource.Spec.Template.Spec)
return nil
})
@@ -121,95 +75,6 @@ func (r *KubernetesDeploymentResource) Define(_ context.Context, tenantControlPl
return nil
}
func (r *KubernetesDeploymentResource) syncContainer(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) {
found, index := utilities.HasNamedContainer(r.resource.Spec.Template.Spec.Containers, konnectivityServerName)
if !found {
r.resource.Spec.Template.Spec.Containers = append(r.resource.Spec.Template.Spec.Containers, corev1.Container{})
index = len(r.resource.Spec.Template.Spec.Containers) - 1
}
r.resource.Spec.Template.Spec.Containers[index].Name = konnectivityServerName
r.resource.Spec.Template.Spec.Containers[index].Image = fmt.Sprintf("%s:%s", tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityServerSpec.Image, tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityServerSpec.Version)
r.resource.Spec.Template.Spec.Containers[index].Command = []string{"/proxy-server"}
args := utilities.ArgsFromSliceToMap(tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityServerSpec.ExtraArgs)
args["--uds-name"] = fmt.Sprintf("%s/konnectivity-server.socket", konnectivityServerPath)
args["--cluster-cert"] = "/etc/kubernetes/pki/apiserver.crt"
args["--cluster-key"] = "/etc/kubernetes/pki/apiserver.key"
args["--mode"] = "grpc"
args["--server-port"] = "0"
args["--agent-port"] = fmt.Sprintf("%d", tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityServerSpec.Port)
args["--admin-port"] = "8133"
args["--health-port"] = "8134"
args["--agent-namespace"] = "kube-system"
args["--agent-service-account"] = AgentName
args["--kubeconfig"] = "/etc/kubernetes/konnectivity-server.conf"
args["--authentication-audience"] = CertCommonName
args["--server-count"] = fmt.Sprintf("%d", tenantControlPlane.Spec.ControlPlane.Deployment.Replicas)
r.resource.Spec.Template.Spec.Containers[index].Args = utilities.ArgsFromMapToSlice(args)
r.resource.Spec.Template.Spec.Containers[index].LivenessProbe = &corev1.Probe{
InitialDelaySeconds: 30,
TimeoutSeconds: 60,
PeriodSeconds: 10,
SuccessThreshold: 1,
FailureThreshold: 3,
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(8134),
Scheme: corev1.URISchemeHTTP,
},
},
}
r.resource.Spec.Template.Spec.Containers[index].Ports = []corev1.ContainerPort{
{
Name: "agentport",
ContainerPort: tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityServerSpec.Port,
Protocol: corev1.ProtocolTCP,
},
{
Name: "adminport",
ContainerPort: 8133,
Protocol: corev1.ProtocolTCP,
},
{
Name: "healthport",
ContainerPort: 8134,
Protocol: corev1.ProtocolTCP,
},
}
r.resource.Spec.Template.Spec.Containers[index].VolumeMounts = []corev1.VolumeMount{
{
Name: "etc-kubernetes-pki",
MountPath: "/etc/kubernetes/pki",
ReadOnly: true,
},
{
Name: "konnectivity-server-kubeconfig",
MountPath: "/etc/kubernetes/konnectivity-server.conf",
SubPath: "konnectivity-server.conf",
ReadOnly: true,
},
{
Name: konnectivityUDSVolume,
MountPath: konnectivityServerPath,
ReadOnly: false,
},
}
r.resource.Spec.Template.Spec.Containers[index].ImagePullPolicy = corev1.PullAlways
r.resource.Spec.Template.Spec.Containers[index].Resources = corev1.ResourceRequirements{
Limits: nil,
Requests: nil,
}
if resources := tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityServerSpec.Resources; resources != nil {
r.resource.Spec.Template.Spec.Containers[index].Resources.Limits = resources.Limits
r.resource.Spec.Template.Spec.Containers[index].Resources.Requests = resources.Requests
}
}
func (r *KubernetesDeploymentResource) mutate(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() (err error) {
// If konnectivity is disabled, no operation is required:
@@ -222,13 +87,7 @@ func (r *KubernetesDeploymentResource) mutate(_ context.Context, tenantControlPl
return fmt.Errorf("the Deployment resource is not ready to be mangled for Konnectivity server enrichment")
}
r.syncContainer(tenantControlPlane)
if err = r.patchKubeAPIServerContainer(); err != nil {
return errors.Wrap(err, "cannot sync patch kube-apiserver container")
}
r.syncVolumes(tenantControlPlane)
r.Builder.Build(r.resource, *tenantControlPlane)
return nil
}
@@ -247,88 +106,3 @@ func (r *KubernetesDeploymentResource) UpdateTenantControlPlaneStatus(_ context.
return nil
}
func (r *KubernetesDeploymentResource) patchKubeAPIServerContainer() error {
// Patching VolumesMounts
found, index := false, 0
found, index = utilities.HasNamedContainer(r.resource.Spec.Template.Spec.Containers, "kube-apiserver")
if !found {
return fmt.Errorf("missing kube-apiserver container, cannot patch arguments")
}
// Adding the egress selector config file flag
args := utilities.ArgsFromSliceToMap(r.resource.Spec.Template.Spec.Containers[index].Args)
utilities.ArgsAddFlagValue(args, "--egress-selector-config-file", konnectivityEgressSelectorConfigurationPath)
r.resource.Spec.Template.Spec.Containers[index].Args = utilities.ArgsFromMapToSlice(args)
vFound, vIndex := false, 0
// Patching the volume mounts
if vFound, vIndex = utilities.HasNamedVolumeMount(r.resource.Spec.Template.Spec.Containers[index].VolumeMounts, konnectivityUDSVolume); !vFound {
r.resource.Spec.Template.Spec.Containers[index].VolumeMounts = append(r.resource.Spec.Template.Spec.Containers[index].VolumeMounts, corev1.VolumeMount{})
vIndex = len(r.resource.Spec.Template.Spec.Containers[index].VolumeMounts) - 1
}
r.resource.Spec.Template.Spec.Containers[index].VolumeMounts[vIndex].Name = konnectivityUDSVolume
r.resource.Spec.Template.Spec.Containers[index].VolumeMounts[vIndex].ReadOnly = false
r.resource.Spec.Template.Spec.Containers[index].VolumeMounts[vIndex].MountPath = konnectivityServerPath
if vFound, vIndex = utilities.HasNamedVolumeMount(r.resource.Spec.Template.Spec.Containers[index].VolumeMounts, egressSelectorConfigurationVolume); !vFound {
r.resource.Spec.Template.Spec.Containers[index].VolumeMounts = append(r.resource.Spec.Template.Spec.Containers[index].VolumeMounts, corev1.VolumeMount{})
vIndex = len(r.resource.Spec.Template.Spec.Containers[index].VolumeMounts) - 1
}
r.resource.Spec.Template.Spec.Containers[index].VolumeMounts[vIndex].Name = egressSelectorConfigurationVolume
r.resource.Spec.Template.Spec.Containers[index].VolumeMounts[vIndex].ReadOnly = false
r.resource.Spec.Template.Spec.Containers[index].VolumeMounts[vIndex].MountPath = "/etc/kubernetes/konnectivity/configurations"
return nil
}
func (r *KubernetesDeploymentResource) syncVolumes(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) {
found, index := false, 0
// Defining volumes for the UDS socket
found, index = utilities.HasNamedVolume(r.resource.Spec.Template.Spec.Volumes, konnectivityUDSVolume)
if !found {
r.resource.Spec.Template.Spec.Volumes = append(r.resource.Spec.Template.Spec.Volumes, corev1.Volume{})
index = len(r.resource.Spec.Template.Spec.Volumes) - 1
}
r.resource.Spec.Template.Spec.Volumes[index].Name = konnectivityUDSVolume
r.resource.Spec.Template.Spec.Volumes[index].VolumeSource = corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: "Memory",
},
}
// Defining volumes for the egress selector configuration
found, index = utilities.HasNamedVolume(r.resource.Spec.Template.Spec.Volumes, egressSelectorConfigurationVolume)
if !found {
r.resource.Spec.Template.Spec.Volumes = append(r.resource.Spec.Template.Spec.Volumes, corev1.Volume{})
index = len(r.resource.Spec.Template.Spec.Volumes) - 1
}
r.resource.Spec.Template.Spec.Volumes[index].Name = egressSelectorConfigurationVolume
r.resource.Spec.Template.Spec.Volumes[index].VolumeSource = corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: tenantControlPlane.Status.Addons.Konnectivity.ConfigMap.Name,
},
DefaultMode: pointer.Int32(420),
},
}
// Defining volume for the Konnectivity kubeconfig
found, index = utilities.HasNamedVolume(r.resource.Spec.Template.Spec.Volumes, konnectivityServerKubeconfigVolume)
if !found {
r.resource.Spec.Template.Spec.Volumes = append(r.resource.Spec.Template.Spec.Volumes, corev1.Volume{})
index = len(r.resource.Spec.Template.Spec.Volumes) - 1
}
r.resource.Spec.Template.Spec.Volumes[index].Name = konnectivityServerKubeconfigVolume
r.resource.Spec.Template.Spec.Volumes[index].VolumeSource = corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: tenantControlPlane.Status.Addons.Konnectivity.Kubeconfig.SecretName,
DefaultMode: pointer.Int32(420),
},
}
}

View File

@@ -0,0 +1,91 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package webhook
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/pkg/errors"
"gomodules.xyz/jsonpatch/v2"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"github.com/clastix/kamaji/internal/webhook/handlers"
)
type handlersChainer struct {
decoder *admission.Decoder
}
//nolint:gocognit
func (h handlersChainer) Handler(object runtime.Object, routeHandlers ...handlers.Handler) admission.HandlerFunc {
return func(ctx context.Context, req admission.Request) admission.Response {
decodedObj, oldDecodedObj := object.DeepCopyObject(), object.DeepCopyObject()
if err := h.decoder.Decode(req, decodedObj); err != nil {
return admission.Errored(http.StatusInternalServerError, errors.Wrap(err, fmt.Sprintf("unable to decode into %T", object)))
}
fnInvoker := func(fn func(runtime.Object) handlers.AdmissionResponse) (patches []jsonpatch.JsonPatchOperation, err error) {
patch, err := fn(decodedObj)(ctx, req)
if err != nil {
return nil, err
}
if patch != nil {
patches = append(patches, patch...)
}
return patches, nil
}
var patches []jsonpatch.JsonPatchOperation
switch req.Operation {
case admissionv1.Create:
for _, routeHandler := range routeHandlers {
handlerPatches, err := fnInvoker(routeHandler.OnCreate)
if err != nil {
return admission.Denied(err.Error())
}
patches = append(patches, handlerPatches...)
}
case admissionv1.Update:
if err := h.decoder.DecodeRaw(req.OldObject, oldDecodedObj); err != nil {
return admission.Errored(http.StatusInternalServerError, errors.Wrap(err, fmt.Sprintf("unable to decode old object into %T", object)))
}
for _, routeHandler := range routeHandlers {
handlerPatches, err := routeHandler.OnUpdate(decodedObj, oldDecodedObj)(ctx, req)
if err != nil {
return admission.Denied(err.Error())
}
patches = append(patches, handlerPatches...)
}
case admissionv1.Delete:
for _, routeHandler := range routeHandlers {
handlerPatches, err := fnInvoker(routeHandler.OnDelete)
if err != nil {
return admission.Denied(err.Error())
}
patches = append(patches, handlerPatches...)
}
case admissionv1.Connect:
break
}
if len(patches) > 0 {
return admission.Patched("patching required", patches...)
}
return admission.Allowed(fmt.Sprintf("%s operation allowed", strings.ToLower(string(req.Operation))))
}
}

View File

@@ -1,29 +0,0 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package webhook
import (
"context"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
const (
deniedMessage = "the current Control Plane is in freezing mode due to a maintenance mode, all the changes are blocked: " +
"removing the webhook may lead to an inconsistent state upon its completion"
)
type Freeze struct{}
func (f *Freeze) Handle(context.Context, admission.Request) admission.Response {
return admission.Denied(deniedMessage)
}
func (f *Freeze) SetupWithManager(mgr controllerruntime.Manager) error {
mgr.GetWebhookServer().Register("/migrate", &webhook.Admission{Handler: f})
return nil
}

View File

@@ -0,0 +1,57 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package handlers
import (
"context"
"fmt"
"strings"
"github.com/pkg/errors"
"gomodules.xyz/jsonpatch/v2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/webhook/utils"
)
type DataStoreSecretValidation struct {
Client client.Client
}
func (d DataStoreSecretValidation) OnCreate(runtime.Object) AdmissionResponse {
return utils.NilOp()
}
func (d DataStoreSecretValidation) OnDelete(runtime.Object) AdmissionResponse {
return utils.NilOp()
}
func (d DataStoreSecretValidation) OnUpdate(object runtime.Object, _ runtime.Object) AdmissionResponse {
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
secret := object.(*corev1.Secret) //nolint:forcetypeassert
dsList := &kamajiv1alpha1.DataStoreList{}
if err := d.Client.List(ctx, dsList, client.MatchingFieldsSelector{Selector: fields.OneTermEqualSelector(kamajiv1alpha1.DatastoreUsedSecretNamespacedNameKey, fmt.Sprintf("%s/%s", secret.GetNamespace(), secret.GetName()))}); err != nil {
return nil, errors.Wrap(err, "cannot list Tenant Control Plane using the provided Secret")
}
if len(dsList.Items) > 0 {
var res []string
for _, ds := range dsList.Items {
res = append(res, ds.GetName())
}
return nil, fmt.Errorf("the Secret is used by the following kamajiv1alpha1.DataStores and cannot be deleted (%s)", strings.Join(res, ", "))
}
return nil, nil
}
}

View File

@@ -0,0 +1,139 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package handlers
import (
"context"
"fmt"
"github.com/pkg/errors"
"gomodules.xyz/jsonpatch/v2"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
)
type DataStoreValidation struct {
Client client.Client
}
func (d DataStoreValidation) OnCreate(object runtime.Object) AdmissionResponse {
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
ds := object.(*kamajiv1alpha1.DataStore) //nolint:forcetypeassert
return nil, d.validate(ctx, *ds)
}
}
func (d DataStoreValidation) OnDelete(object runtime.Object) AdmissionResponse {
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
ds := object.(*kamajiv1alpha1.DataStore) //nolint:forcetypeassert
tcpList := &kamajiv1alpha1.TenantControlPlaneList{}
if err := d.Client.List(ctx, tcpList, client.MatchingFieldsSelector{Selector: fields.OneTermEqualSelector(kamajiv1alpha1.TenantControlPlaneUsedDataStoreKey, ds.GetName())}); err != nil {
return nil, errors.Wrap(err, "cannot retrieve TenantControlPlane list used by the DataStore")
}
if len(tcpList.Items) > 0 {
return nil, fmt.Errorf("the DataStore is used by multiple TenantControlPlanes and cannot be removed")
}
return nil, nil
}
}
func (d DataStoreValidation) OnUpdate(object runtime.Object, oldObj runtime.Object) AdmissionResponse {
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
newDs, oldDs := object.(*kamajiv1alpha1.DataStore), oldObj.(*kamajiv1alpha1.DataStore) //nolint:forcetypeassert
if oldDs.Spec.Driver != newDs.Spec.Driver {
return nil, fmt.Errorf("driver of a DataStore cannot be changed")
}
return nil, d.validate(ctx, *newDs)
}
}
func (d DataStoreValidation) validate(ctx context.Context, ds kamajiv1alpha1.DataStore) error {
if ds.Spec.BasicAuth != nil {
if err := d.validateBasicAuth(ctx, ds); err != nil {
return err
}
}
if err := d.validateTLSConfig(ctx, ds); err != nil {
return err
}
return nil
}
func (d DataStoreValidation) validateBasicAuth(ctx context.Context, ds kamajiv1alpha1.DataStore) error {
if err := d.validateContentReference(ctx, ds.Spec.BasicAuth.Password); err != nil {
return fmt.Errorf("basic-auth password is not valid, %w", err)
}
if err := d.validateContentReference(ctx, ds.Spec.BasicAuth.Username); err != nil {
return fmt.Errorf("basic-auth username is not valid, %w", err)
}
return nil
}
func (d DataStoreValidation) validateTLSConfig(ctx context.Context, ds kamajiv1alpha1.DataStore) error {
if err := d.validateContentReference(ctx, ds.Spec.TLSConfig.CertificateAuthority.Certificate); err != nil {
return fmt.Errorf("CA certificate is not valid, %w", err)
}
if ds.Spec.Driver == kamajiv1alpha1.EtcdDriver {
if ds.Spec.TLSConfig.CertificateAuthority.PrivateKey == nil {
return fmt.Errorf("CA private key is required when using the etcd driver")
}
}
if ds.Spec.TLSConfig.CertificateAuthority.PrivateKey != nil {
if err := d.validateContentReference(ctx, *ds.Spec.TLSConfig.CertificateAuthority.PrivateKey); err != nil {
return fmt.Errorf("CA private key is not valid, %w", err)
}
}
if err := d.validateContentReference(ctx, ds.Spec.TLSConfig.ClientCertificate.Certificate); err != nil {
return fmt.Errorf("client certificate is not valid, %w", err)
}
if err := d.validateContentReference(ctx, ds.Spec.TLSConfig.ClientCertificate.PrivateKey); err != nil {
return fmt.Errorf("client private key is not valid, %w", err)
}
return nil
}
func (d DataStoreValidation) validateContentReference(ctx context.Context, ref kamajiv1alpha1.ContentRef) error {
switch {
case len(ref.Content) > 0:
return nil
case ref.SecretRef == nil:
return fmt.Errorf("the Secret reference is mandatory when bare content is not specified")
case len(ref.SecretRef.SecretReference.Name) == 0:
return fmt.Errorf("the Secret reference name is mandatory")
case len(ref.SecretRef.SecretReference.Namespace) == 0:
return fmt.Errorf("the Secret reference namespace is mandatory")
}
if err := d.Client.Get(ctx, types.NamespacedName{Name: ref.SecretRef.SecretReference.Name, Namespace: ref.SecretRef.SecretReference.Namespace}, &corev1.Secret{}); err != nil {
if k8serrors.IsNotFound(err) {
return fmt.Errorf("secret %s/%s is not found", ref.SecretRef.SecretReference.Namespace, ref.SecretRef.SecretReference.Name)
}
return err
}
return nil
}

View File

@@ -0,0 +1,32 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package handlers
import (
"context"
"fmt"
"gomodules.xyz/jsonpatch/v2"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
type Freeze struct{}
func (f Freeze) OnCreate(runtime.Object) AdmissionResponse {
return f.response
}
func (f Freeze) OnDelete(runtime.Object) AdmissionResponse {
return f.response
}
func (f Freeze) OnUpdate(runtime.Object, runtime.Object) AdmissionResponse {
return f.response
}
func (f Freeze) response(context.Context, admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
return nil, fmt.Errorf("the current Control Plane is in freezing mode due to a maintenance mode, all the changes are blocked: " +
"removing the webhook may lead to an inconsistent state upon its completion")
}

View File

@@ -0,0 +1,20 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package handlers
import (
"context"
"gomodules.xyz/jsonpatch/v2"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
type AdmissionResponse func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error)
type Handler interface {
OnCreate(runtime.Object) AdmissionResponse
OnDelete(runtime.Object) AdmissionResponse
OnUpdate(newObject runtime.Object, prevObject runtime.Object) AdmissionResponse
}

View File

@@ -0,0 +1,55 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package handlers
import (
"context"
"fmt"
"gomodules.xyz/jsonpatch/v2"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/webhook/utils"
)
type TenantControlPlaneDataStore struct {
Client client.Client
}
func (t TenantControlPlaneDataStore) OnCreate(object runtime.Object) AdmissionResponse {
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
tcp := object.(*kamajiv1alpha1.TenantControlPlane) //nolint:forcetypeassert
return nil, t.check(ctx, tcp.Spec.DataStore)
}
}
func (t TenantControlPlaneDataStore) OnDelete(runtime.Object) AdmissionResponse {
return utils.NilOp()
}
func (t TenantControlPlaneDataStore) OnUpdate(object runtime.Object, _ runtime.Object) AdmissionResponse {
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
tcp := object.(*kamajiv1alpha1.TenantControlPlane) //nolint:forcetypeassert
return nil, t.check(ctx, tcp.Spec.DataStore)
}
}
func (t TenantControlPlaneDataStore) check(ctx context.Context, dataStoreName string) error {
if err := t.Client.Get(ctx, types.NamespacedName{Name: dataStoreName}, &kamajiv1alpha1.DataStore{}); err != nil {
if k8serrors.IsNotFound(err) {
return fmt.Errorf("%s DataStore does not exist", dataStoreName)
}
return fmt.Errorf("an unexpected error occurred upon Tenant Control Plane DataStore check, %w", err)
}
return nil
}

View File

@@ -0,0 +1,60 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package handlers
import (
"context"
"fmt"
"github.com/pkg/errors"
"gomodules.xyz/jsonpatch/v2"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/webhook/utils"
)
type TenantControlPlaneDefaults struct {
DefaultDatastore string
}
func (t TenantControlPlaneDefaults) OnCreate(object runtime.Object) AdmissionResponse {
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
tcp := object.(*kamajiv1alpha1.TenantControlPlane) //nolint:forcetypeassert
if len(tcp.Spec.DataStore) == 0 {
operations, err := utils.JSONPatch(tcp, func() {
tcp.Spec.DataStore = t.DefaultDatastore
})
if err != nil {
return nil, errors.Wrap(err, "cannot create patch responses upon Tenant Control Plane creation")
}
return operations, nil
}
return nil, nil
}
}
func (t TenantControlPlaneDefaults) OnDelete(runtime.Object) AdmissionResponse {
return utils.NilOp()
}
func (t TenantControlPlaneDefaults) OnUpdate(object runtime.Object, oldObject runtime.Object) AdmissionResponse {
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
newTCP, oldTCP := object.(*kamajiv1alpha1.TenantControlPlane), oldObject.(*kamajiv1alpha1.TenantControlPlane) //nolint:forcetypeassert
if oldTCP.Spec.DataStore == newTCP.Spec.DataStore {
return nil, nil
}
if len(newTCP.Spec.DataStore) == 0 {
return nil, fmt.Errorf("DataStore is a required field")
}
return nil, nil
}
}

View File

@@ -0,0 +1,97 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package handlers
import (
"context"
"github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
"gomodules.xyz/jsonpatch/v2"
appsv1 "k8s.io/api/apps/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/builders/controlplane"
"github.com/clastix/kamaji/internal/webhook/utils"
)
type TenantControlPlaneDeployment struct {
Client client.Client
DeploymentBuilder controlplane.Deployment
KonnectivityBuilder controlplane.Konnectivity
}
func (t TenantControlPlaneDeployment) OnCreate(runtime.Object) AdmissionResponse {
return utils.NilOp()
}
func (t TenantControlPlaneDeployment) OnDelete(runtime.Object) AdmissionResponse {
return utils.NilOp()
}
func (t TenantControlPlaneDeployment) shouldTriggerCheck(newTCP, oldTCP kamajiv1alpha1.TenantControlPlane) bool {
if newTCP.Spec.ControlPlane.Deployment.AdditionalVolumeMounts == nil &&
len(newTCP.Spec.ControlPlane.Deployment.AdditionalInitContainers) == 0 &&
len(newTCP.Spec.ControlPlane.Deployment.AdditionalContainers) == 0 &&
len(newTCP.Spec.ControlPlane.Deployment.AdditionalVolumes) == 0 {
return false
}
if newTCP.Spec.ControlPlane.Deployment.AdditionalVolumeMounts != nil && oldTCP.Spec.ControlPlane.Deployment.AdditionalVolumeMounts == nil {
return true
}
return !cmp.Equal(newTCP.Spec.ControlPlane.Deployment.AdditionalContainers, oldTCP.Spec.ControlPlane.Deployment.AdditionalContainers) ||
!cmp.Equal(newTCP.Spec.ControlPlane.Deployment.AdditionalInitContainers, oldTCP.Spec.ControlPlane.Deployment.AdditionalInitContainers) ||
!cmp.Equal(newTCP.Spec.ControlPlane.Deployment.AdditionalVolumes, oldTCP.Spec.ControlPlane.Deployment.AdditionalVolumes) ||
!cmp.Equal(newTCP.Spec.ControlPlane.Deployment.AdditionalVolumeMounts, oldTCP.Spec.ControlPlane.Deployment.AdditionalVolumeMounts)
}
func (t TenantControlPlaneDeployment) OnUpdate(newObject runtime.Object, oldObject runtime.Object) AdmissionResponse {
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
tcp, previousTCP := newObject.(*kamajiv1alpha1.TenantControlPlane), oldObject.(*kamajiv1alpha1.TenantControlPlane) //nolint:forcetypeassert
if !t.shouldTriggerCheck(*tcp, *previousTCP) {
return nil, nil
}
ds := kamajiv1alpha1.DataStore{}
if err := t.Client.Get(ctx, types.NamespacedName{Name: tcp.Spec.DataStore}, &ds); err != nil {
return nil, err
}
t.DeploymentBuilder.DataStore = ds
deployment := appsv1.Deployment{}
deployment.Name = tcp.Name
deployment.Namespace = tcp.Namespace
err := t.Client.Get(ctx, types.NamespacedName{Name: tcp.Name, Namespace: tcp.Namespace}, &deployment)
if err != nil && !k8serrors.IsNotFound(err) {
return nil, nil
}
t.DeploymentBuilder.Build(ctx, &deployment, *tcp)
if tcp.Spec.Addons.Konnectivity != nil {
t.KonnectivityBuilder.Build(&deployment, *tcp)
}
if k8serrors.IsNotFound(err) {
err = t.Client.Create(ctx, &deployment, client.DryRunAll)
} else {
err = t.Client.Update(ctx, &deployment, client.DryRunAll)
}
if err != nil {
return nil, errors.Wrap(err, "the resulting Deployment will generate a configuration error, cannot proceed")
}
return nil, nil
}
}

View File

@@ -0,0 +1,53 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package handlers
import (
"context"
"fmt"
"gomodules.xyz/jsonpatch/v2"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/webhook/utils"
)
type TenantControlPlaneKubeletAddresses struct{}
func (t TenantControlPlaneKubeletAddresses) OnCreate(object runtime.Object) AdmissionResponse {
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
tcp := object.(*kamajiv1alpha1.TenantControlPlane) //nolint:forcetypeassert
return nil, t.validatePreferredKubeletAddressTypes(tcp.Spec.Kubernetes.Kubelet.PreferredAddressTypes)
}
}
func (t TenantControlPlaneKubeletAddresses) OnDelete(runtime.Object) AdmissionResponse {
return utils.NilOp()
}
func (t TenantControlPlaneKubeletAddresses) OnUpdate(object runtime.Object, _ runtime.Object) AdmissionResponse {
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
tcp := object.(*kamajiv1alpha1.TenantControlPlane) //nolint:forcetypeassert
return nil, t.validatePreferredKubeletAddressTypes(tcp.Spec.Kubernetes.Kubelet.PreferredAddressTypes)
}
}
func (t TenantControlPlaneKubeletAddresses) validatePreferredKubeletAddressTypes(addressTypes []kamajiv1alpha1.KubeletPreferredAddressType) error {
s := sets.New[string]()
for _, at := range addressTypes {
if s.Has(string(at)) {
return fmt.Errorf("preferred kubelet address types is stated multiple times: %s", at)
}
s.Insert(string(at))
}
return nil
}

View File

@@ -0,0 +1,88 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package handlers
import (
"context"
"fmt"
"strings"
"github.com/blang/semver"
"github.com/pkg/errors"
"gomodules.xyz/jsonpatch/v2"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/upgrade"
"github.com/clastix/kamaji/internal/webhook/utils"
)
type TenantControlPlaneVersion struct{}
func (t TenantControlPlaneVersion) OnCreate(object runtime.Object) AdmissionResponse {
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
tcp := object.(*kamajiv1alpha1.TenantControlPlane) //nolint:forcetypeassert
ver, err := semver.New(t.normalizeKubernetesVersion(tcp.Spec.Kubernetes.Version))
if err != nil {
return nil, errors.Wrap(err, "unable to parse the desired Kubernetes version")
}
supportedVer, supportedErr := semver.Make(t.normalizeKubernetesVersion(upgrade.KubeadmVersion))
if supportedErr != nil {
return nil, errors.Wrap(supportedErr, "unable to parse the Kamaji supported Kubernetes version")
}
if ver.GT(supportedVer) {
return nil, fmt.Errorf("unable to create a TenantControlPlane with a Kubernetes version greater than the supported one, actually %s", supportedVer.String())
}
return nil, nil
}
}
func (t TenantControlPlaneVersion) normalizeKubernetesVersion(input string) string {
if strings.HasPrefix(input, "v") {
return strings.Replace(input, "v", "", 1)
}
return input
}
func (t TenantControlPlaneVersion) OnDelete(runtime.Object) AdmissionResponse {
return utils.NilOp()
}
func (t TenantControlPlaneVersion) OnUpdate(object runtime.Object, oldObject runtime.Object) AdmissionResponse {
return func(ctx context.Context, req admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
newTCP, oldTCP := object.(*kamajiv1alpha1.TenantControlPlane), oldObject.(*kamajiv1alpha1.TenantControlPlane) //nolint:forcetypeassert
oldVer, oldErr := semver.Make(t.normalizeKubernetesVersion(oldTCP.Spec.Kubernetes.Version))
if oldErr != nil {
return nil, errors.Wrap(oldErr, "unable to parse the previous Kubernetes version")
}
newVer, newErr := semver.New(t.normalizeKubernetesVersion(newTCP.Spec.Kubernetes.Version))
if newErr != nil {
return nil, errors.Wrap(newErr, "unable to parse the desired Kubernetes version")
}
supportedVer, supportedErr := semver.Make(t.normalizeKubernetesVersion(upgrade.KubeadmVersion))
if supportedErr != nil {
return nil, errors.Wrap(supportedErr, "unable to parse the Kamaji supported Kubernetes version")
}
switch {
case newVer.GT(supportedVer):
return nil, fmt.Errorf("unable to upgrade to a version greater than the supported one, actually %s", supportedVer.String())
case newVer.LT(oldVer):
return nil, fmt.Errorf("unable to downgrade a TenantControlPlane from %s to %s", oldVer.String(), newVer.String())
case newVer.Minor-oldVer.Minor > 1:
return nil, fmt.Errorf("unable to upgrade to a minor version in a non-sequential mode")
}
return nil, nil
}
}

View File

@@ -0,0 +1,36 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package webhook
import (
"github.com/pkg/errors"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
webhookhandlers "github.com/clastix/kamaji/internal/webhook/handlers"
webhookroutes "github.com/clastix/kamaji/internal/webhook/routes"
)
func Register(mgr controllerruntime.Manager, routes map[webhookroutes.Route][]webhookhandlers.Handler) error {
srv := mgr.GetWebhookServer()
decoder, err := admission.NewDecoder(mgr.GetScheme())
if err != nil {
return errors.Wrap(err, "unable to create NewDecoder for webhook registration")
}
chainer := handlersChainer{
decoder: decoder,
}
for route, handlers := range routes {
srv.Register(route.GetPath(), &webhook.Admission{
Handler: chainer.Handler(route.GetObject(), handlers...),
RecoverPanic: true,
})
}
return nil
}

View File

@@ -0,0 +1,21 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package routes
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
//+kubebuilder:webhook:path=/validate--v1-secret,mutating=false,failurePolicy=ignore,sideEffects=None,groups="",resources=secrets,verbs=delete,versions=v1,name=vdatastoresecrets.kb.io,admissionReviewVersions=v1
type DataStoreSecrets struct{}
func (d DataStoreSecrets) GetPath() string {
return "validate--v1-secret"
}
func (d DataStoreSecrets) GetObject() runtime.Object {
return &corev1.Secret{}
}

View File

@@ -0,0 +1,22 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package routes
import (
"k8s.io/apimachinery/pkg/runtime"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
)
//+kubebuilder:webhook:path=/validate-kamaji-clastix-io-v1alpha1-datastore,mutating=false,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=datastores,verbs=create;update;delete,versions=v1alpha1,name=vdatastore.kb.io,admissionReviewVersions=v1
type DataStoreValidate struct{}
func (d DataStoreValidate) GetPath() string {
return "/validate-kamaji-clastix-io-v1alpha1-datastore"
}
func (d DataStoreValidate) GetObject() runtime.Object {
return &kamajiv1alpha1.DataStore{}
}

View File

@@ -0,0 +1,13 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package routes
import (
"k8s.io/apimachinery/pkg/runtime"
)
type Route interface {
GetPath() string
GetObject() runtime.Object
}

View File

@@ -0,0 +1,22 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package routes
import (
"k8s.io/apimachinery/pkg/runtime"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
)
//+kubebuilder:webhook:path=/mutate-kamaji-clastix-io-v1alpha1-tenantcontrolplane,mutating=true,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=tenantcontrolplanes,verbs=create;update,versions=v1alpha1,name=mtenantcontrolplane.kb.io,admissionReviewVersions=v1
type TenantControlPlaneDefaults struct{}
func (t TenantControlPlaneDefaults) GetObject() runtime.Object {
return &kamajiv1alpha1.TenantControlPlane{}
}
func (t TenantControlPlaneDefaults) GetPath() string {
return "/mutate-kamaji-clastix-io-v1alpha1-tenantcontrolplane"
}

View File

@@ -0,0 +1,20 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package routes
import (
"k8s.io/apimachinery/pkg/runtime"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
)
type TenantControlPlaneMigrate struct{}
func (t TenantControlPlaneMigrate) GetPath() string {
return "/migrate"
}
func (t TenantControlPlaneMigrate) GetObject() runtime.Object {
return &kamajiv1alpha1.TenantControlPlane{}
}

View File

@@ -0,0 +1,22 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package routes
import (
"k8s.io/apimachinery/pkg/runtime"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
)
//+kubebuilder:webhook:path=/validate-kamaji-clastix-io-v1alpha1-tenantcontrolplane,mutating=false,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=tenantcontrolplanes,verbs=create;update,versions=v1alpha1,name=vtenantcontrolplane.kb.io,admissionReviewVersions=v1
type TenantControlPlaneValidate struct{}
func (t TenantControlPlaneValidate) GetPath() string {
return "/validate-kamaji-clastix-io-v1alpha1-tenantcontrolplane"
}
func (t TenantControlPlaneValidate) GetObject() runtime.Object {
return &kamajiv1alpha1.TenantControlPlane{}
}

View File

@@ -0,0 +1,27 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package utils
import (
json "github.com/json-iterator/go"
"github.com/pkg/errors"
"gomodules.xyz/jsonpatch/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func JSONPatch(obj client.Object, modifierFunc func()) ([]jsonpatch.Operation, error) {
original, err := json.Marshal(obj)
if err != nil {
return nil, errors.Wrap(err, "cannot marshal input object")
}
modifierFunc()
patched, err := json.Marshal(obj)
if err != nil {
return nil, errors.Wrap(err, "cannot marshal patched object")
}
return jsonpatch.CreatePatch(original, patched)
}

View File

@@ -0,0 +1,17 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package utils
import (
"context"
"gomodules.xyz/jsonpatch/v2"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
func NilOp() func(context.Context, admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
return func(context.Context, admission.Request) ([]jsonpatch.JsonPatchOperation, error) {
return nil, nil
}
}