Compare commits

...

48 Commits

Author SHA1 Message Date
Dario Tranchitella
5bc1d45fbf chore(gh): trigger helm release on tag 2022-08-23 11:17:19 +02:00
Dario Tranchitella
968b343db9 release(helm): 0.1.0 2022-08-23 11:08:26 +02:00
Dario Tranchitella
c3b72c8a3b chore(gh): helm chart has a new directory 2022-08-23 11:08:26 +02:00
Dario Tranchitella
f64cf284de chore(helm): moving to charts folder 2022-08-23 11:08:26 +02:00
Dario Tranchitella
e12509a970 feat(helm): publishing chart 2022-08-23 11:08:26 +02:00
Dario Tranchitella
626a0eed64 docs: postgresql support using kine 2022-08-23 08:48:56 +02:00
Dario Tranchitella
c32a890561 reorg(docs): ingress is no more mandatory 2022-08-23 08:48:56 +02:00
Dario Tranchitella
2eca4b0eeb refactor(test): ingress is no more required 2022-08-23 08:48:56 +02:00
Dario Tranchitella
47fc9fdc63 refactor: ingress is optional 2022-08-23 08:48:56 +02:00
Dario Tranchitella
08022fc780 refactor(helm)!: making ingress optional 2022-08-23 08:48:56 +02:00
Dario Tranchitella
f6d330992d refactor(kustomize)!: making ingress optional 2022-08-23 08:48:56 +02:00
Dario Tranchitella
c449b01121 refactor(api)!: making ingress optional 2022-08-23 08:48:56 +02:00
Dario Tranchitella
1dcafb91d4 refactor(helm)!: removing ingress from status when unused 2022-08-23 08:48:56 +02:00
Dario Tranchitella
766105b50e refactor(kustomize)!: removing ingress from status when unused 2022-08-23 08:48:56 +02:00
Dario Tranchitella
9d5b14f440 refactor(api)!: removing ingress from status when unused 2022-08-23 08:48:56 +02:00
Dario Tranchitella
0954ae7494 feat: checksum for status 2022-08-23 08:48:56 +02:00
Dario Tranchitella
ec55b203a8 feat(helm)!: checksum for status 2022-08-23 08:48:56 +02:00
Dario Tranchitella
d6c65cfbe6 feat(yaml)!: checksum for status 2022-08-23 08:48:56 +02:00
Dario Tranchitella
99c3b47ec9 feat(api)!: checksum for status 2022-08-23 08:48:56 +02:00
Dario Tranchitella
31b25f7c78 feat: postgresql kine driver 2022-08-23 08:48:56 +02:00
Dario Tranchitella
06504e7133 build(yaml)!: aligning to postgresql driver changes 2022-08-23 08:48:56 +02:00
Dario Tranchitella
4e993b7402 feat(helm)!: support for postgresql kine driver 2022-08-23 08:48:56 +02:00
Dario Tranchitella
ebdb24ce45 feat(api)!: support for postgresql kine driver 2022-08-23 08:48:56 +02:00
Dario Tranchitella
f85ddb1808 docs: refactoring documentation for kine integration 2022-08-23 08:48:56 +02:00
Dario Tranchitella
2b7143294b docs: documenting kine-postgresql 2022-08-23 08:48:56 +02:00
Dario Tranchitella
a57f0edd33 chore: support for postgresql as kine backend 2022-08-23 08:48:56 +02:00
Dario Tranchitella
887655c077 refactor(docs): moving docs in the kine folder 2022-08-23 08:48:56 +02:00
Dario Tranchitella
f1b1802cf0 docs: documenting the kine-mysql feature as datastore 2022-08-23 08:48:56 +02:00
Dario Tranchitella
60c187464d chore: making kine-mariadb make idempotent 2022-08-23 08:48:56 +02:00
Dario Tranchitella
3974214d6a fix: ensuring idempotency when kine is enabled 2022-08-23 08:48:56 +02:00
bsctl
f9c776bda5 fix(helm): peer port instead of client port 2022-08-18 09:48:51 +02:00
Dario Tranchitella
1d6be44edf refactor(etcd): using helm functions for endpoints and initial peers, along with ports 2022-08-17 19:51:44 +02:00
bsctl
dce6c1330a fix(docs): update guides to latest changes 2022-08-17 16:24:48 +02:00
bsctl
922324a71b fix(docs): update guides to latest changes 2022-08-17 16:24:48 +02:00
bsctl
5da7058ed3 fix(docs): update guides to latest changes 2022-08-17 16:24:48 +02:00
bsctl
f7483dcb01 fix(docs): update guides to latest changes 2022-08-17 16:24:48 +02:00
bsctl
e4227e1c81 fix(docs): update guides to latest changes 2022-08-17 16:24:48 +02:00
bsctl
38ba07f3a4 fix(docs): update guides to latest changes 2022-08-17 16:24:48 +02:00
bsctl
5e5bd83b69 fix(helm): readme 2022-08-16 10:16:17 +02:00
bsctl
97a05b5f49 feat(helm): improve pvc management 2022-08-16 10:16:17 +02:00
bsctl
502cb64a5c fix(helm): restore etcd endpoints 2022-08-16 10:16:17 +02:00
bsctl
c3ad545c97 fix(helm): improve chart documentation 2022-08-16 10:16:17 +02:00
bsctl
0c634d1c16 feat(helm): improve liveness probe for etcd 2022-08-16 10:16:17 +02:00
Viktor Oreshkin
cef05c3310 fix(docs): update link to Capsule in README
Signed-off-by: Viktor Oreshkin <imselfish@stek29.rocks>
2022-08-14 09:04:46 +02:00
bsctl
dce027b8f7 fix(cloudinit): set SystemdCgroup 2022-08-13 07:44:05 +02:00
Dario Tranchitella
d1871cf378 fix(docs): aligning to latest konnectivity changes 2022-07-26 17:17:10 +02:00
Dario Tranchitella
a573805bad chore(kind): using kindest/node base image for local testing 2022-07-26 17:17:10 +02:00
Dario Tranchitella
6a4baf99fc fix: missing toleration for kube-proxy addon 2022-07-26 16:42:35 +02:00
112 changed files with 2154 additions and 1953 deletions

View File

@@ -3,7 +3,7 @@ name: Helm Chart
on:
push:
branches: [ "*" ]
tags: [ "helm-v" ]
tags: [ "helm-v*" ]
pull_request:
branches: [ "*" ]
@@ -16,7 +16,7 @@ jobs:
with:
version: 3.3.4
- name: Linting Chart
run: helm lint ./helm/kamaji
run: helm lint ./charts/kamaji
release:
if: startsWith(github.ref, 'refs/tags/helm-v')
runs-on: ubuntu-latest
@@ -26,7 +26,7 @@ jobs:
uses: stefanprodan/helm-gh-pages@master
with:
token: ${{ secrets.BOT_GITHUB_TOKEN }}
charts_dir: helm
charts_dir: charts
charts_url: https://clastix.github.io/charts
owner: clastix
repository: charts

View File

@@ -97,7 +97,7 @@ kustomize: ## Download kustomize locally if necessary.
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
cp config/crd/bases/kamaji.clastix.io_tenantcontrolplanes.yaml helm/kamaji/crds/tenantcontrolplane.yaml
cp config/crd/bases/kamaji.clastix.io_tenantcontrolplanes.yaml charts/kamaji/crds/tenantcontrolplane.yaml
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
@@ -244,5 +244,5 @@ env:
.PHONY: e2e
e2e: env load helm ginkgo ## Create a KinD cluster, install Kamaji on it and run the test suite.
$(HELM) upgrade --debug --install kamaji ./helm/kamaji --create-namespace --namespace kamaji-system --set "image.pullPolicy=Never"
$(HELM) upgrade --debug --install kamaji ./charts/kamaji --create-namespace --namespace kamaji-system --set "image.pullPolicy=Never"
$(GINKGO) -v ./e2e

108
README.md
View File

@@ -8,25 +8,26 @@
</a>
</p>
**Kamaji** is a tool aimed to build and operate a **Managed Kubernetes Service** with a fraction of the operational burden. With **Kamaji**, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scale cloud provider.
**Kamaji** deploys and operates **Kubernetes** at scale with a fraction of the operational burden.
<p align="center" style="padding: 6px 6px">
<img src="assets/kamaji-logo.png" />
</p>
> This project is still in the early development stage which means it's not ready for production as APIs, commands, flags, etc. are subject to change, but also that your feedback can still help to shape it. Please try it out and let us know what you like, dislike, what works, what doesn't, etc.
## Why we are building it?
Global hyper-scalers are leading the Managed Kubernetes space, while regional cloud providers, as well as large corporations, are struggling to offer the same level of experience to their developers because of the lack of the right tools. Also, Kubernetes solutions for the on-premises are designed with an enterprise-first approach and they are too costly when deployed at scale. Project Kamaji aims to solve this pain by leveraging multi-tenancy and simplifying how to run Kubernetes clusters at scale with a fraction of the operational burden.
Global hyper-scalers are leading the Managed Kubernetes space, while other cloud providers, as well as large corporations, are struggling to offer the same experience to their DevOps teams because of the lack of the right tools. Also, current Kubernetes solutions are mainly designed with an enterprise-first approach and they are too costly when deployed at scale.
**Kamaji** aims to solve these pains by leveraging multi-tenancy and simplifying how to run multiple control planes on the same infrastructure with a fraction of the operational burden.
## How it works
Kamaji turns any CNCF conformant Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters we're calling _“tenant clusters”_. As with every Kubernetes cluster, a tenant cluster has a set of nodes and a control plane, composed of several components: `APIs server`, `scheduler`, `controller manager`. What Kamaji does is deploy those components as a pod running in the admin cluster.
And what about the tenant worker nodes? They are just worker nodes: regular instances, e.g. virtual or bare metal, connecting to the APIs server of the tenant cluster. Kamaji's goal is to manage the lifecycle of hundreds of these clusters, not only one, so how can we add another tenant cluster? As you could expect, Kamaji just deploys a new tenant control plane as a new pod in the admin cluster, and then it joins the new tenant worker nodes.
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. What makes Kamaji special is that Control Planes of _“tenant clusters”_ are just regular pods running in the _“admin cluster”_ instead of dedicated Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate. View [Core Concepts](./docs/concepts.md) for a deeper understanding of principles behind Kamaji's design.
<p align="center">
<img src="assets/kamaji-light.png" />
<img src="assets/kamaji-light.png#gh-light-mode-only" />
</p>
<p align="center">
<img src="assets/kamaji-dark.png#gh-dark-mode-only" />
</p>
All the tenant clusters built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves.
@@ -35,69 +36,48 @@ All the tenant clusters built with Kamaji are fully compliant CNCF Kubernetes cl
<img src="assets/screenshot.png" />
</p>
## Save the state
Putting the tenant cluster control plane in a pod is the easiest part. Also, we have to make sure each tenant cluster saves the state to be able to store and retrieve data.
A dedicated `etcd` cluster for each tenant cluster doesnt scale well for a managed service because `etcd` data persistence can be cumbersome at scale, rising the operational effort to mitigate it. So we have to find an alternative keeping in mind our goal for a resilient and cost-optimized solution at the same time. As we can deploy any Kubernetes cluster with an external `etcd` cluster, we explored this option for the tenant control planes. On the admin cluster, we deploy a multi-tenant `etcd` cluster storing the state of multiple tenant clusters.
With this solution, the resiliency is guaranteed by the usual `etcd` mechanism, and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that we have to operate an external `etcd` cluster and manage the access to be sure that each tenant cluster uses only its data. Also, there are limits in size in `etcd`, defaulted to 2GB and configurable to a maximum of 8GB. Were solving this issue by pooling multiple `etcd` and sharding the tenant control planes.
## Getting started
Please refer to the [Getting Started guide](./docs/getting-started-with-kamaji.md) to deploy a minimal setup of Kamaji on KinD.
> This project is still in the early development stage which means it's not ready for production as APIs, commands, flags, etc. are subject to change, but also that your feedback can still help to shape it. Please try it out and let us know what you like, dislike, what works, what doesn't, etc.
## Use cases
Kamaji project has been initially started as a solution for actual and common problems such as minimizing the Total Cost of Ownership while running Kubernetes at large scale. However, it can open a wider range of use cases. Here are a few:
Kamaji project has been initially started as a solution for actual and common problems such as minimizing the Total Cost of Ownership while running Kubernetes at large scale. However, it can open a wider range of use cases.
### Managed Kubernetes
Enabling companies to provide Cloud Native Infrastructure with ease by introducing a strong separation of concerns between management and workloads. Centralize clusters management, monitoring, and observability by leaving developers to focus on the applications, increase productivity and reduce operational costs.
Here are a few:
### Kubernetes as a Service
Provide Kubernetes clusters in a self-service fashion by running management and workloads on different infrastructures and cost centers with the option of Bring Your Own Device - BYOD.
### Control Plane as a Service
Provide multiple Kubernetes control planes running on top of a single Kubernetes cluster. Tenants who use namespaces based isolation often still need access to cluster wide resources like Cluster Roles, Admission Webhooks, or Custom Resource Definitions.
### Edge Computing
Distribute Kubernetes workloads across edge computing locations without having to manage multiple clusters across various providers. Centralize management of hundreds of control planes while leaving workloads to run isolated on their own dedicated infrastructure.
### Cluster Simulation
Check new Kubernetes API or experimental flag or a new tool without impacting production operations. Kamaji will let you simulate such things in a safe and controlled environment.
### Workloads Testing
Check the behaviour of your workloads on different and multiple versions of Kubernetes with ease by deploying multiple Control Planes in a single cluster.
- **Managed Kubernetes:** enable companies to provide Cloud Native Infrastructure with ease by introducing a strong separation of concerns between management and workloads. Centralize clusters management, monitoring, and observability by leaving developers to focus on applications, increase productivity and reduce operational costs.
- **Kubernetes as a Service:** provide Kubernetes clusters in a self-service fashion by running management and workloads on different infrastructures with the option of Bring Your Own Device, BYOD.
- **Control Plane as a Service:** provide multiple Kubernetes control planes running on top of a single Kubernetes cluster. Tenants who use namespaces based isolation often still need access to cluster wide resources like Cluster Roles, Admission Webhooks, or Custom Resource Definitions.
- **Edge Computing:** distribute Kubernetes workloads across edge computing locations without having to manage multiple clusters across various providers. Centralize management of hundreds of control planes while leaving workloads to run isolated on their own dedicated infrastructure.
- **Cluster Simulation:** check new Kubernetes API or experimental flag or a new tool without impacting production operations. Kamaji will let you simulate such things in a safe and controlled environment.
- **Workloads Testing:** check the behaviour of your workloads on different and multiple versions of Kubernetes with ease by deploying multiple Control Planes in a single cluster.
## Features
### Self Service Kubernetes
Leave users the freedom to self-provision their Kubernetes clusters according to the assigned boundaries.
### Multi-cluster Management
Centrally manage multiple tenant clusters from a single admin cluster. Happy SREs.
### Cheaper Control Planes
Place multiple tenant control planes on a single node, instead of having three nodes for a single control plane.
### Stronger Multi-Tenancy
Leave tenants to access the control plane with admin permissions while keeping the tenant isolated at the infrastructure level.
### Kubernetes Inception
Use Kubernetes to manage Kubernetes by re-using all the Kubernetes goodies you already know and love.
### Full APIs compliant
Tenant clusters are fully CNCF compliant built with upstream Kubernetes binaries. A client does not see differences between a Kamaji provisioned cluster and a dedicated cluster.
- **Self Service Kubernetes:** leave users the freedom to self-provision their Kubernetes clusters according to the assigned boundaries.
- **Multi-cluster Management:** centrally manage multiple tenant clusters from a single admin cluster. Happy SREs.
- **Cheaper Control Planes:** place multiple tenant control planes on a single node, instead of having three nodes for a single control plane.
- **Stronger Multi-Tenancy:** leave tenants to access the control plane with admin permissions while keeping the tenant isolated at the infrastructure level.
- **Kubernetes Inception:** use Kubernetes to manage Kubernetes by re-using all the Kubernetes goodies you already know and love.
- **Full APIs compliant:** tenant clusters are fully CNCF compliant built with upstream Kubernetes binaries. A user does not see differences between a Kamaji provisioned cluster and a dedicated cluster.
## Roadmap
- [ ] Benchmarking and stress-test
- [x] Support for dynamic address allocation on native Load Balancer
- [x] Zero Downtime Tenant Control Plane upgrade
- [ ] `konnectivity` integration
- [x] `konnectivity` integration
- [ ] Provisioning of Tenant Control Plane through Cluster APIs
- [ ] Prometheus metrics for monitoring and alerting
- [ ] `kine` integration, i.e. use MySQL, SQLite, PostgreSQL as datastore
- [ ] Terraform provider
- [ ] Custom Prometheus metrics for monitoring and alerting
- [x] `kine` integration for MySQL as datastore
- [x] `kine` integration for PostgreSQL as datastore
- [ ] Deeper `kubeadm` integration
- [ ] `etcd` pooling
- [ ] Pooling of multiple `etcd` datastores
- [ ] Autoscaling of Tenant Control Plane pods
## Documentation
Please, check the project's [documentation](./docs/) for getting started with Kamaji.
@@ -108,27 +88,23 @@ Kamaji is Open Source with Apache 2 license and any contribution is welcome.
## Community
Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
## FAQ
## FAQs
Q. What does Kamaji means?
A. Kamaji is named as the character _Kamaji_ from the Japanese movie [_Spirited Away_](https://en.wikipedia.org/wiki/Spirited_Away).
Q. Is Kamaji another Kubernetes distribution?
A. No, Kamaji is a tool you can install on top of any CNCF conformant Kubernetes to provide hundreds of managed Tenant clusters as a service. We tested Kamaji on vanilla Kubernetes 1.23+, KinD, and MS Azure AKS. We expect it to work smoothly on other Kubernetes distributions. The tenant clusters made with Kamaji are conformant CNCF Kubernetes vanilla clusters built with `kubeadm`.
A. No, Kamaji is a Kubernetes Operator you can install on top of any Kubernetes cluster to provide hundreds of managed Kubernetes clusters as a service. We tested Kamaji on vanilla Kubernetes 1.22+, KinD, and Azure AKS. We expect it to work smoothly on other Kubernetes distributions. The tenant clusters made with Kamaji are conformant CNCF Kubernetes clusters as we leverage on [`kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
Q. Is it safe to run Kubernetes control plane components in a pod?
Q. Is it safe to run Kubernetes control plane components in a pod instead of dedicated virtual machines?
A. Yes, the tenant control plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on a server. The same unchanged images of upstream `APIs server`, `scheduler`, `controller manager` are used.
A. Yes, the tenant control plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
Q. And what about multi-tenant `etcd`? I never heard of it.
Q. You already provide a Kubernetes multi-tenancy solution with [Capsule](https://capsule.clastix.io). Why does Kamaji matter?
A. Even if multi-tenant deployment for `etcd` is not a common practice, multi-tenancy, RBAC, and client authentication has been [supported](https://etcd.io/docs/v3.5/op-guide/authentication/) in `etcd` from a long time.
A. A multi-tenancy solution, like Capsule shares the Kubernetes control plane among all tenants keeping tenant namespaces isolated by policies. While the solution is the right choice by balancing between features and ease of usage, there are cases where a tenant user requires access to the control plane, for example, when a tenant requires to manage CRDs on his own. With Kamaji, you can provide cluster admin permissions to the tenant.
Q. You already provide a Kubernetes multi-tenancy solution with [Capsule](capsule.clastix.io). Why does Kamaji matter?
Q. Well you convinced me, how to get a try?
A. Lighter Multi-Tenancy solutions, like Capsule shares the Kubernetes control plane among all tenants keeping tenant namespaces isolated by policies. While these solutions are the right choice by balancing between features and ease of usage, there are cases where a tenant user requires access to the control plane, for example, when a tenant requires to manage CRDs on his own. With Kamaji, you can provide admin permissions to the tenants.
Q. So I need a costly cloud infrastructure to try Kamaji?
A. No, it is possible to getting started Kamaji on your laptop with [KinD](./docs/getting-started-with-kamaji.md).
A. It is possible to get started with Kamaji on a laptop with [KinD](./docs/getting-started-with-kamaji.md) installed.

View File

@@ -16,31 +16,34 @@ import (
type APIServerCertificatesStatus struct {
SecretName string `json:"secretName,omitempty"`
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
Checksum string `json:"checksum,omitempty"`
}
// ETCDCertificateStatus defines the observed state of ETCD Certificate for API server.
type ETCDCertificateStatus struct {
SecretName string `json:"secretName,omitempty"`
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
Checksum string `json:"checksum,omitempty"`
}
// ETCDCertificatesStatus defines the observed state of ETCD Certificate for API server.
type ETCDCertificatesStatus struct {
APIServer ETCDCertificateStatus `json:"apiServer,omitempty"`
CA ETCDCertificateStatus `json:"ca,omitempty"`
APIServer APIServerCertificatesStatus `json:"apiServer,omitempty"`
CA ETCDCertificateStatus `json:"ca,omitempty"`
}
// CertificatePrivateKeyPairStatus defines the status.
type CertificatePrivateKeyPairStatus struct {
SecretName string `json:"secretName,omitempty"`
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
ResourceVersion string `json:"resourceVersion,omitempty"`
SecretName string `json:"secretName,omitempty"`
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
Checksum string `json:"checksum,omitempty"`
}
// PublicKeyPrivateKeyPairStatus defines the status.
type PublicKeyPrivateKeyPairStatus struct {
SecretName string `json:"secretName,omitempty"`
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
Checksum string `json:"checksum,omitempty"`
}
// CertificatesStatus defines the observed state of ETCD Certificates.
@@ -61,24 +64,25 @@ type ETCDStatus struct {
}
type SQLCertificateStatus struct {
SecretName string `json:"secretName,omitempty"`
ResourceVersion string `json:"resourceVersion,omitempty"`
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
SecretName string `json:"secretName,omitempty"`
Checksum string `json:"checksum,omitempty"`
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
}
type SQLConfigStatus struct {
SecretName string `json:"secretName,omitempty"`
ResourceVersion string `json:"resourceVersion,omitempty"`
SecretName string `json:"secretName,omitempty"`
Checksum string `json:"checksum,omitempty"`
}
type SQLSetupStatus struct {
Schema string `json:"schema,omitempty"`
User string `json:"user,omitempty"`
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
SQLConfigResourceVersion string `json:"sqlConfigResourceVersion,omitempty"`
Schema string `json:"schema,omitempty"`
User string `json:"user,omitempty"`
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
Checksum string `json:"checksum,omitempty"`
}
type KineMySQLStatus struct {
type KineStatus struct {
Driver string `json:"driver,omitempty"`
Config SQLConfigStatus `json:"config,omitempty"`
Setup SQLSetupStatus `json:"setup,omitempty"`
Certificate SQLCertificateStatus `json:"certificate,omitempty"`
@@ -86,8 +90,8 @@ type KineMySQLStatus struct {
// StorageStatus defines the observed state of StorageStatus.
type StorageStatus struct {
ETCD *ETCDStatus `json:"etcd,omitempty"`
KineMySQL *KineMySQLStatus `json:"kineMySQL,omitempty"`
ETCD *ETCDStatus `json:"etcd,omitempty"`
Kine *KineStatus `json:"kine,omitempty"`
}
// KubeconfigStatus contains information about the generated kubeconfig.
@@ -128,22 +132,26 @@ type KubeadmPhasesStatus struct {
type ExternalKubernetesObjectStatus struct {
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
// Resource version of k8s object
RV string `json:"resourceVersion,omitempty"`
Checksum string `json:"checksum,omitempty"`
// Last time when k8s object was updated
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
}
// KonnectivityStatus defines the status of Konnectivity as Addon.
type KonnectivityStatus struct {
Enabled bool `json:"enabled"`
EgressSelectorConfiguration string `json:"egressSelectorConfiguration,omitempty"`
Certificate CertificatePrivateKeyPairStatus `json:"certificate,omitempty"`
Kubeconfig KubeconfigStatus `json:"kubeconfig,omitempty"`
ServiceAccount ExternalKubernetesObjectStatus `json:"sa,omitempty"`
ClusterRoleBinding ExternalKubernetesObjectStatus `json:"clusterrolebinding,omitempty"`
Agent ExternalKubernetesObjectStatus `json:"agent,omitempty"`
Service KubernetesServiceStatus `json:"service,omitempty"`
Enabled bool `json:"enabled"`
ConfigMap KonnectivityConfigMap `json:"configMap,omitempty"`
Certificate CertificatePrivateKeyPairStatus `json:"certificate,omitempty"`
Kubeconfig KubeconfigStatus `json:"kubeconfig,omitempty"`
ServiceAccount ExternalKubernetesObjectStatus `json:"sa,omitempty"`
ClusterRoleBinding ExternalKubernetesObjectStatus `json:"clusterrolebinding,omitempty"`
Agent ExternalKubernetesObjectStatus `json:"agent,omitempty"`
Service KubernetesServiceStatus `json:"service,omitempty"`
}
type KonnectivityConfigMap struct {
Name string `json:"name,omitempty"`
Checksum string `json:"checksum,omitempty"`
}
// AddonStatus defines the observed state of an Addon.
@@ -189,7 +197,7 @@ type KubernetesStatus struct {
Version KubernetesVersion `json:"version,omitempty"`
Deployment KubernetesDeploymentStatus `json:"deployment,omitempty"`
Service KubernetesServiceStatus `json:"service,omitempty"`
Ingress KubernetesIngressStatus `json:"ingress,omitempty"`
Ingress *KubernetesIngressStatus `json:"ingress,omitempty"`
}
// +kubebuilder:validation:Enum=Provisioning;Upgrading;Ready;NotReady
@@ -207,7 +215,7 @@ type KubernetesVersion struct {
Version string `json:"version,omitempty"`
// +kubebuilder:default=Provisioning
// Status returns the current status of the Kubernetes version, such as its provisioning state, or completed upgrade.
Status *KubernetesVersionStatus `json:"status"`
Status *KubernetesVersionStatus `json:"status,omitempty"`
}
// KubernetesDeploymentStatus defines the status for the Tenant Control Plane Deployment in the management cluster.

View File

@@ -64,13 +64,12 @@ type ControlPlane struct {
// Defining the options for the Tenant Control Plane Service resource.
Service ServiceSpec `json:"service"`
// Defining the options for an Optional Ingress which will expose API Server of the Tenant Control Plane
Ingress IngressSpec `json:"ingress,omitempty"`
Ingress *IngressSpec `json:"ingress,omitempty"`
}
// IngressSpec defines the options for the ingress which will expose API Server of the Tenant Control Plane.
type IngressSpec struct {
AdditionalMetadata AdditionalMetadata `json:"additionalMetadata,omitempty"`
Enabled bool `json:"enabled"`
IngressClassName string `json:"ingressClassName,omitempty"`
// Hostname is an optional field which will be used as Ingress's Host. If it is not defined,
// Ingress's host will be "<tenant>.<namespace>.<domain>", where domain is specified under NetworkProfileSpec

View File

@@ -203,7 +203,11 @@ func (in *ControlPlane) DeepCopyInto(out *ControlPlane) {
*out = *in
in.Deployment.DeepCopyInto(&out.Deployment)
in.Service.DeepCopyInto(&out.Service)
in.Ingress.DeepCopyInto(&out.Ingress)
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = new(IngressSpec)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlane.
@@ -390,19 +394,34 @@ func (in *IngressSpec) DeepCopy() *IngressSpec {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KineMySQLStatus) DeepCopyInto(out *KineMySQLStatus) {
func (in *KineStatus) DeepCopyInto(out *KineStatus) {
*out = *in
out.Config = in.Config
in.Setup.DeepCopyInto(&out.Setup)
in.Certificate.DeepCopyInto(&out.Certificate)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KineMySQLStatus.
func (in *KineMySQLStatus) DeepCopy() *KineMySQLStatus {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KineStatus.
func (in *KineStatus) DeepCopy() *KineStatus {
if in == nil {
return nil
}
out := new(KineMySQLStatus)
out := new(KineStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KonnectivityConfigMap) DeepCopyInto(out *KonnectivityConfigMap) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KonnectivityConfigMap.
func (in *KonnectivityConfigMap) DeepCopy() *KonnectivityConfigMap {
if in == nil {
return nil
}
out := new(KonnectivityConfigMap)
in.DeepCopyInto(out)
return out
}
@@ -430,6 +449,7 @@ func (in *KonnectivitySpec) DeepCopy() *KonnectivitySpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KonnectivityStatus) DeepCopyInto(out *KonnectivityStatus) {
*out = *in
out.ConfigMap = in.ConfigMap
in.Certificate.DeepCopyInto(&out.Certificate)
in.Kubeconfig.DeepCopyInto(&out.Kubeconfig)
in.ServiceAccount.DeepCopyInto(&out.ServiceAccount)
@@ -623,7 +643,11 @@ func (in *KubernetesStatus) DeepCopyInto(out *KubernetesStatus) {
in.Version.DeepCopyInto(&out.Version)
in.Deployment.DeepCopyInto(&out.Deployment)
in.Service.DeepCopyInto(&out.Service)
in.Ingress.DeepCopyInto(&out.Ingress)
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = new(KubernetesIngressStatus)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesStatus.
@@ -768,9 +792,9 @@ func (in *StorageStatus) DeepCopyInto(out *StorageStatus) {
*out = new(ETCDStatus)
(*in).DeepCopyInto(*out)
}
if in.KineMySQL != nil {
in, out := &in.KineMySQL, &out.KineMySQL
*out = new(KineMySQLStatus)
if in.Kine != nil {
in, out := &in.Kine, &out.Kine
*out = new(KineStatus)
(*in).DeepCopyInto(*out)
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 288 KiB

After

Width:  |  Height:  |  Size: 189 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 284 KiB

After

Width:  |  Height:  |  Size: 184 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 93 KiB

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: kamaji
description: A Kubernetes distribution aimed to build and operate a Managed Kubernetes service with a fraction of operational burde.
description: Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler.
# A chart can be either an 'application' or a 'library' chart.
#
@@ -15,7 +15,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.1
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
@@ -23,8 +23,8 @@ version: 0.1.1
# It is recommended to use it with quotes.
appVersion: 0.1.0
home: https://github.com/clastix/kamaji-internal/tree/master/helm/kamaji
sources: ["https://github.com/clastix/kamaji-internal"]
home: https://github.com/clastix/kamaji
sources: ["https://github.com/clastix/kamaji"]
kubeVersion: ">=1.18"
maintainers:
- email: iam@mendrugory.com
@@ -33,3 +33,5 @@ maintainers:
name: Dario Tranchitella
- email: me@maxgio.it
name: Massimiliano Giovagnoli
- email: me@bsctl.io
name: Adriano Pezzuto

View File

@@ -2,26 +2,7 @@
![Version: 0.1.1](https://img.shields.io/badge/Version-0.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.1.0](https://img.shields.io/badge/AppVersion-0.1.0-informational?style=flat-square)
A Kubernetes distribution aimed to build and operate a Managed Kubernetes service with a fraction of operational burde.
**Homepage:** <https://github.com/clastix/kamaji-internal/tree/master/helm/kamaji>
### Pre-requisites
Kamaji requires a [multi-tenant etcd cluster](https://github.com/clastix/kamaji-internal/blob/master/deploy/getting-started-with-kamaji.md#setup-internal-multi-tenant-etcd) cluster.
The installation and provisioning processes are already put in place by the Helm Chart starting from v0.1.1 in order to streamline the local test.
> For production use an externally managed etcd is highly recommended, the etcd addon offered by this chart is not considered production-grade.
If you'd like to use an externally managed etcd instance, you can specify the overrides and by setting the value `etcd.deploy=false`.
### Install Kamaji
To install the chart with the release name `kamaji`:
```console
helm upgrade --install --namespace kamaji-system --create-namespace kamaji .
```
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler.
## Maintainers
@@ -30,15 +11,57 @@ helm upgrade --install --namespace kamaji-system --create-namespace kamaji .
| Gonzalo Gabriel Jiménez Fuentes | <iam@mendrugory.com> | |
| Dario Tranchitella | <dario@tranchitella.eu> | |
| Massimiliano Giovagnoli | <me@maxgio.it> | |
| Adriano Pezzuto | <me@bsctl.io> | |
## Source Code
* <https://github.com/clastix/kamaji-internal>
* <https://github.com/clastix/kamaji>
## Requirements
Kubernetes: `>=1.18`
[Kamaji](https://github.com/clastix/kamaji) requires a [multi-tenant `etcd`](https://github.com/clastix/kamaji-internal/blob/master/deploy/getting-started-with-kamaji.md#setup-internal-multi-tenant-etcd) cluster.
This Helm Chart starting from v0.1.1 provides the installation of an internal `etcd` in order to streamline the local test. If you'd like to use an externally managed etcd instance, you can specify the overrides and by setting the value `etcd.deploy=false`.
> For production use an externally managed `etcd` is highly recommended, the `etcd` addon offered by this Chart is not considered production-grade.
## Install Kamaji
To install the Chart with the release name `kamaji`:
helm upgrade --install --namespace kamaji-system --create-namespace clastix/kamaji
Show the status:
helm status kamaji -n kamaji-system
Upgrade the Chart
helm upgrade kamaji -n kamaji-system clastix/kamaji
Uninstall the Chart
helm uninstall kamaji -n kamaji-system
## Customize the installation
There are two methods for specifying overrides of values during Chart installation: `--values` and `--set`.
The `--values` option is the preferred method because it allows you to keep your overrides in a YAML file, rather than specifying them all on the command line. Create a copy of the YAML file `values.yaml` and add your overrides to it.
Specify your overrides file when you install the Chart:
helm upgrade kamaji --install --namespace kamaji-system --create-namespace clastix/kamaji --values myvalues.yaml
The values in your overrides file `myvalues.yaml` will override their counterparts in the Chart's values.yaml file. Any values in `values.yaml` that werent overridden will keep their defaults.
If you only need to make minor customizations, you can specify them on the command line by using the `--set` option. For example:
helm upgrade kamaji --install --namespace kamaji-system --create-namespace clastix/kamaji --set etcd.deploy=false
Here the values you can override:
## Values
| Key | Type | Default | Description |
@@ -46,12 +69,19 @@ Kubernetes: `>=1.18`
| affinity | object | `{}` | Kubernetes affinity rules to apply to Kamaji controller pods |
| configPath | string | `"./kamaji.yaml"` | Configuration file path alternative. (default "./kamaji.yaml") |
| etcd.compactionInterval | int | `0` | ETCD Compaction interval (e.g. "5m0s"). (default: "0" (disabled)) |
| etcd.deploy | bool | `true` | Install an etcd 3.5 with enabled multi-tenancy along with Kamaji |
| etcd.deploy | bool | `true` | Install an etcd with enabled multi-tenancy along with Kamaji |
| etcd.image | object | `{"pullPolicy":"IfNotPresent","repository":"quay.io/coreos/etcd","tag":"v3.5.4"}` | Install specific etcd image |
| etcd.livenessProbe | object | `{"failureThreshold":8,"httpGet":{"path":"/health?serializable=true","port":2381,"scheme":"HTTP"},"initialDelaySeconds":10,"periodSeconds":10,"timeoutSeconds":15}` | The livenessProbe for the etcd container |
| etcd.overrides.caSecret.name | string | `"etcd-certs"` | Name of the secret which contains CA's certificate and private key. (default: "etcd-certs") |
| etcd.overrides.caSecret.namespace | string | `"kamaji-system"` | Namespace of the secret which contains CA's certificate and private key. (default: "kamaji-system") |
| etcd.overrides.clientSecret.name | string | `"root-client-certs"` | Name of the secret which contains ETCD client certificates. (default: "root-client-certs") |
| etcd.overrides.clientSecret.namespace | string | `"kamaji-system"` | Name of the namespace where the secret which contains ETCD client certificates is. (default: "kamaji-system") |
| etcd.overrides.endpoints | string | `"https://etcd-0.etcd.kamaji-system.svc.cluster.local:2379,https://etcd-1.etcd.kamaji-system.svc.cluster.local:2379,https://etcd-2.etcd.kamaji-system.svc.cluster.local:2379"` | (string) Comma-separated list of the endpoints of the etcd cluster's members. |
| etcd.overrides.endpoints | object | `{"etcd-0":"https://etcd-0.etcd.kamaji-system.svc.cluster.local","etcd-1":"https://etcd-1.etcd.kamaji-system.svc.cluster.local","etcd-2":"https://etcd-2.etcd.kamaji-system.svc.cluster.local"}` | (map) Dictionary of the endpoints for the etcd cluster's members, key is the name of the etcd server. Don't define any port, inflected from .etcd.peerApiPort value. |
| etcd.peerApiPort | int | `2380` | The peer API port which servers are listening to. |
| etcd.persistence.accessModes[0] | string | `"ReadWriteOnce"` | |
| etcd.persistence.size | string | `"10Gi"` | |
| etcd.persistence.storageClass | string | `""` | |
| etcd.port | int | `2379` | The client request port. |
| etcd.serviceAccount.create | bool | `true` | Create a ServiceAccount, required to install and provision the etcd backing storage (default: true) |
| etcd.serviceAccount.name | string | `""` | Define the ServiceAccount name to use during the setup and provision of the etcd backing storage (default: "") |
| extraArgs | list | `[]` | A list of extra arguments to add to the kamaji controller default ones |

View File

@@ -0,0 +1,56 @@
{{ template "chart.header" . }}
{{ template "chart.deprecationWarning" . }}
{{ template "chart.badgesSection" . }}
{{ template "chart.description" . }}
{{ template "chart.maintainersSection" . }}
{{ template "chart.sourcesSection" . }}
{{ template "chart.requirementsSection" . }}
[Kamaji](https://github.com/clastix/kamaji) requires a [multi-tenant `etcd`](https://github.com/clastix/kamaji-internal/blob/master/deploy/getting-started-with-kamaji.md#setup-internal-multi-tenant-etcd) cluster.
This Helm Chart starting from v0.1.1 provides the installation of an internal `etcd` in order to streamline the local test. If you'd like to use an externally managed etcd instance, you can specify the overrides and by setting the value `etcd.deploy=false`.
> For production use an externally managed `etcd` is highly recommended, the `etcd` addon offered by this Chart is not considered production-grade.
## Install Kamaji
To install the Chart with the release name `kamaji`:
helm upgrade --install --namespace kamaji-system --create-namespace clastix/kamaji
Show the status:
helm status kamaji -n kamaji-system
Upgrade the Chart
helm upgrade kamaji -n kamaji-system clastix/kamaji
Uninstall the Chart
helm uninstall kamaji -n kamaji-system
## Customize the installation
There are two methods for specifying overrides of values during Chart installation: `--values` and `--set`.
The `--values` option is the preferred method because it allows you to keep your overrides in a YAML file, rather than specifying them all on the command line. Create a copy of the YAML file `values.yaml` and add your overrides to it.
Specify your overrides file when you install the Chart:
helm upgrade kamaji --install --namespace kamaji-system --create-namespace clastix/kamaji --values myvalues.yaml
The values in your overrides file `myvalues.yaml` will override their counterparts in the Chart's values.yaml file. Any values in `values.yaml` that werent overridden will keep their defaults.
If you only need to make minor customizations, you can specify them on the command line by using the `--set` option. For example:
helm upgrade kamaji --install --namespace kamaji-system --create-namespace clastix/kamaji --set etcd.deploy=false
Here the values you can override:
{{ template "chart.valuesSection" . }}

View File

@@ -282,8 +282,6 @@ spec:
type: string
type: object
type: object
enabled:
type: boolean
hostname:
description: Hostname is an optional field which will be used
as Ingress's Host. If it is not defined, Ingress's host
@@ -292,8 +290,6 @@ spec:
type: string
ingressClassName:
type: string
required:
- enabled
type: object
service:
description: Defining the options for the Tenant Control Plane
@@ -479,6 +475,8 @@ spec:
properties:
agent:
properties:
checksum:
type: string
lastUpdate:
description: Last time when k8s object was updated
format: date-time
@@ -487,23 +485,22 @@ spec:
type: string
namespace:
type: string
resourceVersion:
description: Resource version of k8s object
type: string
type: object
certificate:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
clusterrolebinding:
properties:
checksum:
type: string
lastUpdate:
description: Last time when k8s object was updated
format: date-time
@@ -512,12 +509,14 @@ spec:
type: string
namespace:
type: string
resourceVersion:
description: Resource version of k8s object
type: object
configMap:
properties:
checksum:
type: string
name:
type: string
type: object
egressSelectorConfiguration:
type: string
enabled:
type: boolean
kubeconfig:
@@ -534,6 +533,8 @@ spec:
type: object
sa:
properties:
checksum:
type: string
lastUpdate:
description: Last time when k8s object was updated
format: date-time
@@ -542,9 +543,6 @@ spec:
type: string
namespace:
type: string
resourceVersion:
description: Resource version of k8s object
type: string
type: object
service:
description: KubernetesServiceStatus defines the status for
@@ -738,33 +736,33 @@ spec:
apiServer:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
apiServerKubeletClient:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
ca:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
@@ -773,9 +771,11 @@ spec:
of ETCD Certificate for API server.
properties:
apiServer:
description: ETCDCertificateStatus defines the observed state
of ETCD Certificate for API server.
description: APIServerCertificatesStatus defines the observed
state of ETCD Certificate for API server.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
@@ -786,6 +786,8 @@ spec:
description: ETCDCertificateStatus defines the observed state
of ETCD Certificate for API server.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
@@ -796,28 +798,30 @@ spec:
frontProxyCA:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
frontProxyClient:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
sa:
description: PublicKeyPrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
@@ -1277,8 +1281,6 @@ spec:
description: Version is the running Kubernetes version of
the Tenant Control Plane.
type: string
required:
- status
type: object
type: object
storage:
@@ -1324,34 +1326,36 @@ spec:
- name
type: object
type: object
kineMySQL:
kine:
properties:
certificate:
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
config:
properties:
resourceVersion:
checksum:
type: string
secretName:
type: string
type: object
driver:
type: string
setup:
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
schema:
type: string
sqlConfigResourceVersion:
type: string
user:
type: string
type: object

View File

@@ -93,19 +93,41 @@ Namespace of the etcd root-client secret.
{{- end }}
{{/*
List the declared etcd endpoints, using the overrides in case of unmanaged etcd.
Comma separated list of etcd endpoints, using the overrides in case of unmanaged etcd.
*/}}
{{- define "etcd.endpoints" }}
{{- $list := list -}}
{{- if .Values.etcd.deploy }}
{{- range $count := until 3 -}}
{{- printf "https://%s-%d.%s.%s.svc.cluster.local:2379" "etcd" $count ( include "etcd.serviceName" . ) $.Release.Namespace -}}
{{- if lt $count ( sub 3 1 ) -}}
{{- printf "," -}}
{{- range $count := until 3 -}}
{{- $list = append $list (printf "https://%s-%d.%s.%s.svc.cluster.local:%d" "etcd" $count ( include "etcd.serviceName" . ) $.Release.Namespace (int $.Values.etcd.port) ) -}}
{{- end }}
{{- else if .Values.etcd.overrides.endpoints }}
{{- range $v := .Values.etcd.overrides.endpoints -}}
{{- $list = append $list (printf "%s:%d" $v (int $.Values.etcd.port) ) -}}
{{- end -}}
{{- else if not .Values.etcd.overrides.endpoints }}
{{- fail "A valid .Values.etcd.overrides.endpoints required!" }}
{{- end }}
{{- else }}
{{- required "A valid .Values.etcd.overrides.endpoints required!" .Values.etcd.overrides.endpoints }}
{{- join "," $list -}}
{{- end }}
{{/*
Key-value of the etcd peers, using the overrides in case of unmanaged etcd.
*/}}
{{- define "etcd.initialCluster" }}
{{- $list := list -}}
{{- if .Values.etcd.deploy }}
{{- range $i, $count := until 3 -}}
{{- $list = append $list ( printf "etcd-%d=https://%s-%d.%s.%s.svc.cluster.local:%d" $i "etcd" $count ( include "etcd.serviceName" . ) $.Release.Namespace (int $.Values.etcd.peerApiPort) ) -}}
{{- end }}
{{- else if .Values.etcd.overrides.endpoints }}
{{- range $k, $v := .Values.etcd.overrides.endpoints -}}
{{- $list = append $list ( printf "%s=%s:%d" $k $v (int $.Values.etcd.peerApiPort) ) -}}
{{- end -}}
{{- else if not .Values.etcd.overrides.endpoints }}
{{- fail "A valid .Values.etcd.overrides.endpoints required!" }}
{{- end }}
{{- join "," $list -}}
{{- end }}
{{/*

View File

@@ -7,7 +7,7 @@ metadata:
annotations:
"helm.sh/hook": pre-delete
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded
"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed"
name: "{{ .Release.Name }}-etcd-teardown"
namespace: {{ .Release.Namespace }}
spec:

View File

@@ -7,7 +7,7 @@ metadata:
annotations:
"helm.sh/hook": post-install
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded
"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed"
name: "{{ .Release.Name }}-etcd-setup"
namespace: {{ .Release.Namespace }}
spec:
@@ -43,7 +43,7 @@ spec:
kubectl --namespace={{ .Release.Namespace }} delete secret --ignore-not-found=true {{ include "etcd.caSecretName" . }} {{ include "etcd.clientSecretName" . }} &&
kubectl --namespace={{ .Release.Namespace }} create secret generic {{ include "etcd.caSecretName" . }} --from-file=/certs/ca.crt --from-file=/certs/ca.key --from-file=/certs/peer-key.pem --from-file=/certs/peer.pem --from-file=/certs/server-key.pem --from-file=/certs/server.pem &&
kubectl --namespace={{ .Release.Namespace }} create secret tls {{ include "etcd.clientSecretName" . }} --key=/certs/root-client-key.pem --cert=/certs/root-client.pem &&
kubectl --namespace={{ .Release.Namespace }} rollout status sts/etcd --timeout=120s
kubectl --namespace={{ .Release.Namespace }} rollout status sts/etcd --timeout=300s
volumeMounts:
- mountPath: /certs
name: certs

View File

@@ -9,9 +9,9 @@ metadata:
spec:
clusterIP: None
ports:
- port: 2379
- port: {{ .Values.etcd.port }}
name: client
- port: 2380
- port: {{ .Values.etcd.peerApiPort }}
name: peer
selector:
{{- include "etcd.selectorLabels" . | nindent 4 }}

View File

@@ -24,7 +24,8 @@ spec:
secretName: {{ include "etcd.caSecretName" . }}
containers:
- name: etcd
image: quay.io/coreos/etcd:v3.5.1
image: {{ .Values.etcd.image.repository }}:{{ .Values.etcd.image.tag | default "v3.5.4" }}
imagePullPolicy: {{ .Values.etcd.image.pullPolicy }}
ports:
- containerPort: 2379
name: client
@@ -40,22 +41,25 @@ spec:
- --data-dir=/var/run/etcd
- --name=$(POD_NAME)
- --initial-cluster-state=new
- --initial-cluster=etcd-0=https://etcd-0.etcd.$(POD_NAMESPACE).svc.cluster.local:2380,etcd-1=https://etcd-1.etcd.$(POD_NAMESPACE).svc.cluster.local:2380,etcd-2=https://etcd-2.etcd.$(POD_NAMESPACE).svc.cluster.local:2380
- --initial-cluster={{ include "etcd.initialCluster" . }}
- --initial-advertise-peer-urls=https://$(POD_NAME).etcd.$(POD_NAMESPACE).svc.cluster.local:2380
- --advertise-client-urls=https://$(POD_NAME).etcd.$(POD_NAMESPACE).svc.cluster.local:2379
- --initial-cluster-token=kamaji
- --listen-client-urls=https://0.0.0.0:2379
- --advertise-client-urls={{ include "etcd.endpoints" . }}
- --listen-metrics-urls=http://0.0.0.0:2381
- --listen-peer-urls=https://0.0.0.0:2380
- --client-cert-auth=true
- --peer-client-cert-auth=true
- --trusted-ca-file=/etc/etcd/pki/ca.crt
- --cert-file=/etc/etcd/pki/server.pem
- --key-file=/etc/etcd/pki/server-key.pem
- --listen-peer-urls=https://0.0.0.0:2380
- --peer-client-cert-auth=true
- --peer-trusted-ca-file=/etc/etcd/pki/ca.crt
- --peer-cert-file=/etc/etcd/pki/peer.pem
- --peer-key-file=/etc/etcd/pki/peer-key.pem
- --auto-compaction-mode=periodic
- --auto-compaction-retention=5m
- --snapshot-count=10000
- --quota-backend-bytes=8589934592
- --v=8
env:
- name: POD_NAME
@@ -66,32 +70,24 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.etcd.livenessProbe }}
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /health
port: 2381
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.etcd.startupProbe }}
startupProbe:
failureThreshold: 24
httpGet:
host: 127.0.0.1
path: /health
port: 2381
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
{{- toYaml . | nindent 12 }}
{{- end }}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 8Gi
- metadata:
name: data
spec:
storageClassName: {{ .Values.etcd.persistence.storageClassName }}
accessModes:
{{- range .Values.etcd.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.etcd.persistence.size }}
{{- end }}

View File

@@ -19,13 +19,43 @@ extraArgs: []
configPath: "./kamaji.yaml"
etcd:
# -- Install an etcd 3.5 with enabled multi-tenancy along with Kamaji
# -- Install an etcd with enabled multi-tenancy along with Kamaji
deploy: true
# -- The peer API port which servers are listening to.
peerApiPort: 2380
# -- The client request port.
port: 2379
# -- Install specific etcd image
image:
repository: quay.io/coreos/etcd
tag: "v3.5.4"
pullPolicy: IfNotPresent
# -- The livenessProbe for the etcd container
livenessProbe:
failureThreshold: 8
httpGet:
path: /health?serializable=true
port: 2381
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
serviceAccount:
# -- Create a ServiceAccount, required to install and provision the etcd backing storage (default: true)
create: true
# -- Define the ServiceAccount name to use during the setup and provision of the etcd backing storage (default: "")
name: ""
persistence:
size: 10Gi
storageClass: ""
accessModes:
- ReadWriteOnce
overrides:
caSecret:
# -- Name of the secret which contains CA's certificate and private key. (default: "etcd-certs")
@@ -37,8 +67,11 @@ etcd:
name: root-client-certs
# -- Name of the namespace where the secret which contains ETCD client certificates is. (default: "kamaji-system")
namespace: kamaji-system
# -- (string) Comma-separated list of the endpoints of the etcd cluster's members.
endpoints: "https://etcd-0.etcd.kamaji-system.svc.cluster.local:2379,https://etcd-1.etcd.kamaji-system.svc.cluster.local:2379,https://etcd-2.etcd.kamaji-system.svc.cluster.local:2379"
# -- (map) Dictionary of the endpoints for the etcd cluster's members, key is the name of the etcd server. Don't define any port, inflected from .etcd.peerApiPort value.
endpoints:
etcd-0: https://etcd-0.etcd.kamaji-system.svc.cluster.local
etcd-1: https://etcd-1.etcd.kamaji-system.svc.cluster.local
etcd-2: https://etcd-2.etcd.kamaji-system.svc.cluster.local
# -- ETCD Compaction interval (e.g. "5m0s"). (default: "0" (disabled))
compactionInterval: 0

View File

@@ -282,8 +282,6 @@ spec:
type: string
type: object
type: object
enabled:
type: boolean
hostname:
description: Hostname is an optional field which will be used
as Ingress's Host. If it is not defined, Ingress's host
@@ -292,8 +290,6 @@ spec:
type: string
ingressClassName:
type: string
required:
- enabled
type: object
service:
description: Defining the options for the Tenant Control Plane
@@ -479,6 +475,8 @@ spec:
properties:
agent:
properties:
checksum:
type: string
lastUpdate:
description: Last time when k8s object was updated
format: date-time
@@ -487,23 +485,22 @@ spec:
type: string
namespace:
type: string
resourceVersion:
description: Resource version of k8s object
type: string
type: object
certificate:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
clusterrolebinding:
properties:
checksum:
type: string
lastUpdate:
description: Last time when k8s object was updated
format: date-time
@@ -512,12 +509,14 @@ spec:
type: string
namespace:
type: string
resourceVersion:
description: Resource version of k8s object
type: object
configMap:
properties:
checksum:
type: string
name:
type: string
type: object
egressSelectorConfiguration:
type: string
enabled:
type: boolean
kubeconfig:
@@ -534,6 +533,8 @@ spec:
type: object
sa:
properties:
checksum:
type: string
lastUpdate:
description: Last time when k8s object was updated
format: date-time
@@ -542,9 +543,6 @@ spec:
type: string
namespace:
type: string
resourceVersion:
description: Resource version of k8s object
type: string
type: object
service:
description: KubernetesServiceStatus defines the status for
@@ -738,33 +736,33 @@ spec:
apiServer:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
apiServerKubeletClient:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
ca:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
@@ -773,9 +771,11 @@ spec:
of ETCD Certificate for API server.
properties:
apiServer:
description: ETCDCertificateStatus defines the observed state
of ETCD Certificate for API server.
description: APIServerCertificatesStatus defines the observed
state of ETCD Certificate for API server.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
@@ -786,6 +786,8 @@ spec:
description: ETCDCertificateStatus defines the observed state
of ETCD Certificate for API server.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
@@ -796,28 +798,30 @@ spec:
frontProxyCA:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
frontProxyClient:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
sa:
description: PublicKeyPrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
@@ -1277,8 +1281,6 @@ spec:
description: Version is the running Kubernetes version of
the Tenant Control Plane.
type: string
required:
- status
type: object
type: object
storage:
@@ -1324,34 +1326,36 @@ spec:
- name
type: object
type: object
kineMySQL:
kine:
properties:
certificate:
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
config:
properties:
resourceVersion:
checksum:
type: string
secretName:
type: string
type: object
driver:
type: string
setup:
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
schema:
type: string
sqlConfigResourceVersion:
type: string
user:
type: string
type: object

View File

@@ -243,15 +243,11 @@ spec:
type: string
type: object
type: object
enabled:
type: boolean
hostname:
description: Hostname is an optional field which will be used as Ingress's Host. If it is not defined, Ingress's host will be "<tenant>.<namespace>.<domain>", where domain is specified under NetworkProfileSpec
type: string
ingressClassName:
type: string
required:
- enabled
type: object
service:
description: Defining the options for the Tenant Control Plane Service resource.
@@ -423,6 +419,8 @@ spec:
properties:
agent:
properties:
checksum:
type: string
lastUpdate:
description: Last time when k8s object was updated
format: date-time
@@ -431,23 +429,22 @@ spec:
type: string
namespace:
type: string
resourceVersion:
description: Resource version of k8s object
type: string
type: object
certificate:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
clusterrolebinding:
properties:
checksum:
type: string
lastUpdate:
description: Last time when k8s object was updated
format: date-time
@@ -456,12 +453,14 @@ spec:
type: string
namespace:
type: string
resourceVersion:
description: Resource version of k8s object
type: object
configMap:
properties:
checksum:
type: string
name:
type: string
type: object
egressSelectorConfiguration:
type: string
enabled:
type: boolean
kubeconfig:
@@ -477,6 +476,8 @@ spec:
type: object
sa:
properties:
checksum:
type: string
lastUpdate:
description: Last time when k8s object was updated
format: date-time
@@ -485,9 +486,6 @@ spec:
type: string
namespace:
type: string
resourceVersion:
description: Resource version of k8s object
type: string
type: object
service:
description: KubernetesServiceStatus defines the status for the Tenant Control Plane Service in the management cluster.
@@ -617,33 +615,33 @@ spec:
apiServer:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
apiServerKubeletClient:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
ca:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
@@ -651,8 +649,10 @@ spec:
description: ETCDCertificatesStatus defines the observed state of ETCD Certificate for API server.
properties:
apiServer:
description: ETCDCertificateStatus defines the observed state of ETCD Certificate for API server.
description: APIServerCertificatesStatus defines the observed state of ETCD Certificate for API server.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
@@ -662,6 +662,8 @@ spec:
ca:
description: ETCDCertificateStatus defines the observed state of ETCD Certificate for API server.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
@@ -672,28 +674,30 @@ spec:
frontProxyCA:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
frontProxyClient:
description: CertificatePrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
sa:
description: PublicKeyPrivateKeyPairStatus defines the status.
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
@@ -1039,8 +1043,6 @@ spec:
version:
description: Version is the running Kubernetes version of the Tenant Control Plane.
type: string
required:
- status
type: object
type: object
storage:
@@ -1085,34 +1087,36 @@ spec:
- name
type: object
type: object
kineMySQL:
kine:
properties:
certificate:
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
resourceVersion:
type: string
secretName:
type: string
type: object
config:
properties:
resourceVersion:
checksum:
type: string
secretName:
type: string
type: object
driver:
type: string
setup:
properties:
checksum:
type: string
lastUpdate:
format: date-time
type: string
schema:
type: string
sqlConfigResourceVersion:
type: string
user:
type: string
type: object

View File

@@ -5,48 +5,18 @@ metadata:
spec:
controlPlane:
deployment:
replicas: 2
additionalMetadata:
annotations:
environment.clastix.io: test
tier.clastix.io: "0"
labels:
tenant.clastix.io: test
kind.clastix.io: deployment
replicas: 1
service:
additionalMetadata:
annotations:
environment.clastix.io: test
tier.clastix.io: "0"
labels:
tenant.clastix.io: test
kind.clastix.io: service
serviceType: LoadBalancer
ingress:
enabled: true
hostname: kamaji.local
ingressClassName: nginx
additionalMetadata:
annotations:
kubernetes.io/ingress.allow-http: "false"
nginx.ingress.kubernetes.io/secure-backends: "true"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
kubernetes:
version: "v1.23.1"
kubelet:
cgroupfs: systemd
cgroupfs: cgroupfs
admissionControllers:
- ResourceQuota
- LimitRanger
networkProfile:
address: "127.0.0.1"
port: 6443
certSANs:
- "test.clastix.labs"
serviceCidr: "10.96.0.0/16"
podCidr: "10.244.0.0/16"
dnsServiceIPs:
- "10.96.0.10"
addons:
coreDNS: {}
kubeProxy: {}

View File

@@ -81,7 +81,7 @@ func getDefaultDeleteableResources(config GroupDeleteableResourceBuilderConfigur
Endpoints: getArrayFromString(config.tcpReconcilerConfig.ETCDEndpoints),
},
}
case types.KineMySQL:
case types.KineMySQL, types.KinePostgreSQL:
return []resources.DeleteableResource{
&resources.SQLSetup{
Client: config.client,
@@ -215,25 +215,27 @@ func getKubernetesStorageResources(c client.Client, log logr.Logger, tcpReconcil
Endpoints: getArrayFromString(tcpReconcilerConfig.ETCDEndpoints),
},
}
case types.KineMySQL:
case types.KineMySQL, types.KinePostgreSQL:
return []resources.Resource{
&resources.SQLStorageConfig{
Client: c,
Name: "sql-config",
Host: dbConnection.GetHost(),
Port: dbConnection.GetPort(),
Driver: dbConnection.Driver(),
},
&resources.SQLSetup{
Client: c,
Name: "sql-setup",
DBConnection: dbConnection,
Driver: dbConnection.Driver(),
},
&resources.SQLCertificate{
Client: c,
Name: "sql-certificate",
StorageType: tcpReconcilerConfig.ETCDStorageType,
SQLConfigSecretName: tcpReconcilerConfig.KineMySQLSecretName,
SQLConfigSecretNamespace: tcpReconcilerConfig.KineMySQLSecretNamespace,
SQLConfigSecretName: tcpReconcilerConfig.KineSecretName,
SQLConfigSecretNamespace: tcpReconcilerConfig.KineSecretNamespace,
},
}
default:

View File

@@ -8,7 +8,9 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
"strings"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
@@ -17,41 +19,67 @@ import (
)
func (r *TenantControlPlaneReconciler) getStorageConnection(ctx context.Context) (sql.DBConnection, error) {
var driver sql.Driver
var dbName string
// TODO: https://github.com/clastix/kamaji/issues/67
switch r.Config.ETCDStorageType {
case types.ETCD:
return nil, nil
case types.KineMySQL:
secret := &corev1.Secret{}
namespacedName := k8stypes.NamespacedName{Namespace: r.Config.KineMySQLSecretNamespace, Name: r.Config.KineMySQLSecretName}
if err := r.Client.Get(ctx, namespacedName, secret); err != nil {
return nil, err
}
rootCAs := x509.NewCertPool()
if ok := rootCAs.AppendCertsFromPEM(secret.Data["ca.crt"]); !ok {
return nil, fmt.Errorf("error creating root ca for mysql db connector")
}
certificate, err := tls.X509KeyPair(secret.Data["server.crt"], secret.Data["server.key"])
if err != nil {
return nil, err
}
return sql.GetDBConnection(
sql.ConnectionConfig{
SQLDriver: sql.MySQL,
User: "root",
Password: string(secret.Data["MYSQL_ROOT_PASSWORD"]),
Host: r.Config.KineMySQLHost,
Port: r.Config.KineMySQLPort,
DBName: "mysql",
TLSConfig: &tls.Config{
ServerName: r.Config.KineMySQLHost,
RootCAs: rootCAs,
Certificates: []tls.Certificate{certificate},
},
},
)
driver = sql.MySQL
dbName = "mysql"
case types.KinePostgreSQL:
driver = sql.PostgreSQL
default:
return nil, nil
}
secret := &corev1.Secret{}
namespacedName := k8stypes.NamespacedName{Namespace: r.Config.KineSecretNamespace, Name: r.Config.KineSecretName}
if err := r.Client.Get(ctx, namespacedName, secret); err != nil {
return nil, err
}
if t := "kamaji.clastix.io/kine"; string(secret.Type) != t {
return nil, fmt.Errorf("expecting a secret of type %s", t)
}
keys := []string{"ca.crt", "server.crt", "server.key", "username", "password"}
if secret.Data == nil {
return nil, fmt.Errorf("the Kine secret %s/%s is missing all the required keys (%s)", secret.GetNamespace(), secret.GetName(), strings.Join(keys, ","))
}
for _, key := range keys {
if _, ok := secret.Data[key]; !ok {
return nil, fmt.Errorf("missing required key %s for the Kine secret %s/%s", key, secret.GetNamespace(), secret.GetName())
}
}
rootCAs := x509.NewCertPool()
if ok := rootCAs.AppendCertsFromPEM(secret.Data["ca.crt"]); !ok {
return nil, fmt.Errorf("error create root CA for the DB connector")
}
certificate, err := tls.X509KeyPair(secret.Data["server.crt"], secret.Data["server.key"])
if err != nil {
return nil, errors.Wrap(err, "cannot retrieve x.509 key pair from the Kine Secret")
}
return sql.GetDBConnection(
sql.ConnectionConfig{
SQLDriver: driver,
User: string(secret.Data["username"]),
Password: string(secret.Data["password"]),
Host: r.Config.KineHost,
Port: r.Config.KinePort,
DBName: dbName,
TLSConfig: &tls.Config{
ServerName: r.Config.KineHost,
RootCAs: rootCAs,
Certificates: []tls.Certificate{certificate},
},
},
)
}

View File

@@ -47,10 +47,10 @@ type TenantControlPlaneReconcilerConfig struct {
ETCDCompactionInterval string
TmpBaseDirectory string
DBConnection sql.DBConnection
KineMySQLSecretName string
KineMySQLSecretNamespace string
KineMySQLHost string
KineMySQLPort int
KineSecretName string
KineSecretNamespace string
KineHost string
KinePort int
KineContainerImage string
}

View File

@@ -14,8 +14,13 @@ make -C kind
make -C etcd
```
## Multi-tenant MySQL-MariaDB cluster
## Multi-tenant cluster using Kine
`kine` is an `etcd` shim that allows using different datastore.
Kamaji actually support the following backends:
- [MySQL](kine/mysql/README.md)
- [PostgreSQL](kine/postgresql/README.md)
> This assumes you already have a running Kubernetes cluster and kubeconfig.
Read [this](./mysql/README.md) in order to know more about.

View File

@@ -0,0 +1,120 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: etcd
namespace:
---
apiVersion: v1
kind: Service
metadata:
name: etcd-server
namespace:
spec:
type: ClusterIP
ports:
- name: client
port: 2379
protocol: TCP
targetPort: 2379
selector:
app: etcd
---
apiVersion: v1
kind: Service
metadata:
name: etcd
namespace:
spec:
clusterIP: None
ports:
- port: 2379
name: client
- port: 2380
name: peer
selector:
app: etcd
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: etcd
labels:
app: etcd
namespace:
spec:
serviceName: etcd
selector:
matchLabels:
app: etcd
replicas: 3
template:
metadata:
name: etcd
labels:
app: etcd
spec:
serviceAccountName: etcd
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: etcd
volumes:
- name: certs
secret:
secretName: etcd-certs
containers:
- name: etcd
image: quay.io/coreos/etcd:v3.5.1
ports:
- containerPort: 2379
name: client
- containerPort: 2380
name: peer
volumeMounts:
- name: data
mountPath: /var/run/etcd
- name: certs
mountPath: /etc/etcd/pki
command:
- etcd
- --data-dir=/var/run/etcd
- --name=$(POD_NAME)
- --initial-cluster-state=new
- --initial-cluster=etcd-0=https://etcd-0.etcd.$(POD_NAMESPACE).svc.cluster.local:2380,etcd-1=https://etcd-1.etcd.$(POD_NAMESPACE).svc.cluster.local:2380,etcd-2=https://etcd-2.etcd.$(POD_NAMESPACE).svc.cluster.local:2380
- --initial-advertise-peer-urls=https://$(POD_NAME).etcd.$(POD_NAMESPACE).svc.cluster.local:2380
- --initial-cluster-token=kamaji
- --listen-client-urls=https://0.0.0.0:2379
- --advertise-client-urls=https://etcd-0.etcd.$(POD_NAMESPACE).svc.cluster.local:2379,https://etcd-1.etcd.$(POD_NAMESPACE).svc.cluster.local:2379,https://etcd-2.etcd.$(POD_NAMESPACE).svc.cluster.local:2379,https://etcd-server.$(POD_NAMESPACE).svc.cluster.local:2379
- --client-cert-auth=true
- --trusted-ca-file=/etc/etcd/pki/ca.crt
- --cert-file=/etc/etcd/pki/server.pem
- --key-file=/etc/etcd/pki/server-key.pem
- --listen-peer-urls=https://0.0.0.0:2380
- --peer-client-cert-auth=true
- --peer-trusted-ca-file=/etc/etcd/pki/ca.crt
- --peer-cert-file=/etc/etcd/pki/peer.pem
- --peer-key-file=/etc/etcd/pki/peer-key.pem
- --snapshot-count=8000
- --auto-compaction-mode=periodic
- --auto-compaction-retention=5m
- --quota-backend-bytes=8589934592
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 8Gi

View File

@@ -1,5 +1,30 @@
# azure parameters
export KAMAJI_REGION=westeurope
export KAMAJI_RG=Kamaji
# https://docs.microsoft.com/en-us/azure/aks/faq#why-are-two-resource-groups-created-with-aks
export KAMAJI_CLUSTER=kamaji
export KAMAJI_NODE_RG=MC_${KAMAJI_RG}_${KAMAJI_CLUSTER}_${KAMAJI_REGION}
# kamaji parameters
export KAMAJI_NAMESPACE=kamaji-system
# tenant cluster parameters
export TENANT_NAMESPACE=tenants
export TENANT_NAME=tenant-00
export TENANT_DOMAIN=$KAMAJI_REGION.cloudapp.azure.com
export TENANT_VERSION=v1.23.5
export TENANT_PORT=6443 # port used to expose the tenant api server
export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server
export TENANT_POD_CIDR=10.36.0.0/16
export TENANT_SVC_CIDR=10.96.0.0/16
export TENANT_DNS_SERVICE=10.96.0.10
export TENANT_VM_SIZE=Standard_D2ds_v4
export TENANT_VM_IMAGE=UbuntuLTS
export TENANT_RG=$TENANT_NAME
export TENANT_NSG=$TENANT_NAME-nsg
export TENANT_VNET_NAME=$TENANT_NAME
export TENANT_VNET_ADDRESS=172.12.0.0/16
export TENANT_SUBNET_NAME=$TENANT_NAME-subnet
export TENANT_SUBNET_ADDRESS=172.12.10.0/24
export TENANT_VMSS=$TENANT_NAME-vmss

View File

@@ -1,5 +0,0 @@
# etcd machine addresses
export ETCD0=192.168.32.10
export ETCD1=192.168.32.11
export ETCD2=192.168.32.12
export ETCDHOSTS=($ETCD0 $ETCD1 $ETCD2)

View File

@@ -1,5 +0,0 @@
# etcd endpoints
export ETCD_NAMESPACE=etcd-system
export ETCD0=etcd-0.etcd.${ETCD_NAMESPACE}.svc.cluster.local
export ETCD1=etcd-1.etcd.${ETCD_NAMESPACE}.svc.cluster.local
export ETCD2=etcd-2.etcd.${ETCD_NAMESPACE}.svc.cluster.local

View File

@@ -1,22 +0,0 @@
export KAMAJI_REGION=westeurope
# tenant cluster parameters
export TENANT_NAMESPACE=tenants
export TENANT_NAME=tenant-00
export TENANT_DOMAIN=$KAMAJI_REGION.cloudapp.azure.com
export TENANT_VERSION=v1.23.4
export TENANT_ADDR=10.240.0.100 # IP used to expose the tenant control plane
export TENANT_PORT=6443 # PORT used to expose the tenant control plane
export TENANT_POD_CIDR=10.36.0.0/16
export TENANT_SVC_CIDR=10.96.0.0/16
export TENANT_DNS_SERVICE=10.96.0.10
export TENANT_VM_SIZE=Standard_D2ds_v4
export TENANT_VM_IMAGE=UbuntuLTS
export TENANT_RG=$TENANT_NAME
export TENANT_NSG=$TENANT_NAME-nsg
export TENANT_VNET_NAME=$TENANT_NAME
export TENANT_VNET_ADDRESS=192.168.0.0/16
export TENANT_SUBNET_NAME=$TENANT_NAME-subnet
export TENANT_SUBNET_ADDRESS=192.168.10.0/24
export TENANT_VMSS=$TENANT_NAME-vmss

View File

@@ -1,17 +0,0 @@
# tenant cluster parameters
export TENANT_NAMESPACE=tenants
export TENANT_NAME=tenant-00
export TENANT_DOMAIN=clastix.labs
export TENANT_VERSION=v1.23.1
export TENANT_ADDR=192.168.32.150 # IP used to expose the tenant control plane
export TENANT_PORT=6443 # PORT used to expose the tenant control plane
export TENANT_POD_CIDR=10.36.0.0/16
export TENANT_SVC_CIDR=10.96.0.0/16
export TENANT_DNS_SERVICE=10.96.0.10
# tenant node addresses
export WORKER0=192.168.32.90
export WORKER1=192.168.32.91
export WORKER2=192.168.32.92
export WORKER3=192.168.32.93

18
deploy/kamaji.env Normal file
View File

@@ -0,0 +1,18 @@
# kamaji parameters
export KAMAJI_NAMESPACE=kamaji-system
# tenant cluster parameters
export TENANT_NAMESPACE=tenants
export TENANT_NAME=tenant-00
export TENANT_DOMAIN=clastix.labs
export TENANT_VERSION=v1.23.5
export TENANT_PORT=6443 # port used to expose the tenant api server
export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server
export TENANT_POD_CIDR=10.36.0.0/16
export TENANT_SVC_CIDR=10.96.0.0/16
export TENANT_DNS_SERVICE=10.96.0.10
# tenant node addresses
export WORKER0=172.12.0.10
export WORKER1=172.12.0.11
export WORKER2=172.12.0.12

View File

@@ -2,7 +2,7 @@ kind_path := $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
include ../etcd/Makefile
.PHONY: kind ingress-nginx kamaji-kind-worker-build
.PHONY: kind ingress-nginx
.DEFAULT_GOAL := kamaji
@@ -29,14 +29,5 @@ ingress-nginx: ingress-nginx-install
ingress-nginx-install:
kubectl apply -f $(kind_path)/nginx-deploy.yaml
kamaji-kind-worker-build:
docker build -f $(kind_path)/kamaji-kind-worker.dockerfile -t clastix/kamaji-kind-worker:$${WORKER_VERSION:-latest} .
kamaji-kind-worker-push: kamaji-kind-worker-build
docker push clastix/kamaji-kind-worker:$${WORKER_VERSION:-latest}
kamaji-kind-worker-join:
$(kind_path)/join-node.bash
kamaji-kind-worker-join-through-konnectivity:
$(kind_path)/join-node-konnectivity.bash

View File

@@ -1,33 +0,0 @@
{
"cniVersion": "0.3.1",
"name": "kindnet",
"plugins": [
{
"type": "ptp",
"ipMasq": false,
"ipam": {
"type": "host-local",
"dataDir": "/run/cni-ipam-state",
"routes": [
{
"dst": "0.0.0.0/0"
}
],
"ranges": [
[
{
"subnet": "10.244.0.0/24"
}
]
]
},
"mtu": 1500
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}

View File

@@ -1,35 +0,0 @@
#!/bin/bash
set -e
# Constants
export DOCKER_IMAGE_NAME="clastix/kamaji-kind-worker"
# Variables
export KUBERNETES_VERSION=${1:-latest}
export KUBECONFIG="${KUBECONFIG:-/tmp/kubeconfig}"
if [ -z $2 ]
then
MAPPING_PORT=""
else
MAPPING_PORT="-p ${2}:80"
fi
export KONNECTIVITY_PROXY_HOST=${3:-konnectiviy.local}
clear
echo "Welcome to join a new node through Konnectivity"
echo -ne "\nChecking right kubeconfig\n"
kubectl cluster-info
echo "Are you pointing to the right tenant control plane? (Type return to continue)"
read
JOIN_CMD="$(kubeadm --kubeconfig=${KUBECONFIG} token create --print-join-command) --ignore-preflight-errors=SystemVerification"
echo "Deploying new node..."
KIND_IP=$(docker inspect kamaji-control-plane --format='{{.NetworkSettings.Networks.kind.IPAddress}}')
NODE=$(docker run -d --add-host $KONNECTIVITY_PROXY_HOST:$KIND_IP --privileged -v /lib/modules:/lib/modules:ro -v /var --net host $MAPPING_PORT $DOCKER_IMAGE_NAME:$KUBERNETES_VERSION)
sleep 10
echo "Joining new node..."
docker exec -e JOIN_CMD="$JOIN_CMD" $NODE /bin/bash -c "$JOIN_CMD"

View File

@@ -3,11 +3,11 @@
set -e
# Constants
export DOCKER_IMAGE_NAME="clastix/kamaji-kind-worker"
export DOCKER_IMAGE_NAME="kindest/node"
export DOCKER_NETWORK="kind"
# Variables
export KUBERNETES_VERSION=${1:-latest}
export KUBERNETES_VERSION=${1:-v1.23.5}
export KUBECONFIG="${KUBECONFIG:-/tmp/kubeconfig}"
if [ -z $2 ]
@@ -31,3 +31,6 @@ NODE=$(docker run -d --privileged -v /lib/modules:/lib/modules:ro -v /var --net
sleep 10
echo "Joining new node..."
docker exec -e JOIN_CMD="$JOIN_CMD" $NODE /bin/bash -c "$JOIN_CMD"
echo "Node has joined! Remember to install the kind-net CNI by issuing the following command:"
echo " $: kubectl apply -f https://raw.githubusercontent.com/aojea/kindnet/master/install-kindnet.yaml"

View File

@@ -1,4 +0,0 @@
ARG KUBERNETES_VERSION=v1.23.4
FROM kindest/node:$KUBERNETES_VERSION
COPY ./cni-kindnet-config.json /etc/cni/net.d/10-kindnet.conflist

47
deploy/kine/README.md Normal file
View File

@@ -0,0 +1,47 @@
# Kine integration
[kine](https://github.com/k3s-io/kine) is an `etcd` shim that allows to use a different datastore for your Kubernetes cluster.
Kamaji actually allows to run a shared datastore using different MySQL and PostgreSQL schemas per Tenant.
This can help in overcoming the `etcd` limitation regarding scalability and cluster size, as well with HA and replication.
## Kamaji additional CLI flags
Once a compatible database is running, we need to provide information about it to Kamaji by using the following flags:
```
--etcd-storage-type={kine-mysql,kine-postgresql}
--kine-host=<database host>
--kine-port=<database port>
--kine-secret-name=<secret name>
--kine-secret-namespace=<secret namespace>
```
## Kine Secret
The Kine Secret must be configured as follows:
```yaml
apiVersion: v1
data:
ca.crt: "content of the Certificate Authority for SSL connection"
password: "password of the super user"
server.crt: "content of the certificate for SSL connection"
server.key: "content of the private key for SSL connection"
username: "username of the super user"
kind: Secret
metadata:
name: kine-secret
namespace: kamaji-system
type: kamaji.clastix.io/kine
```
> Please, pay attention to the type `kamaji.clastix.io/kine`: this check is enforced at the code level to ensure the required data is provided.
> Actually, the `kine` integration expects a secured connection to the database since the sensitivity data of the Tenant.
## Drivers
Further details on the setup for each driver are available here:
- [MySQL/MariaDB](../deploy/kine/mysql/README.md)
- [PostgreSQL](../deploy/kine/postgresql/README.md)

View File

@@ -0,0 +1,40 @@
ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
mariadb: mariadb-certificates mariadb-secret mariadb-kine-secret mariadb-deployment
mariadb-certificates:
rm -rf $(ROOT_DIR)/certs && mkdir $(ROOT_DIR)/certs
cfssl gencert -initca $(ROOT_DIR)/ca-csr.json | cfssljson -bare $(ROOT_DIR)/certs/ca
@mv $(ROOT_DIR)/certs/ca.pem $(ROOT_DIR)/certs/ca.crt
@mv $(ROOT_DIR)/certs/ca-key.pem $(ROOT_DIR)/certs/ca.key
cfssl gencert -ca=$(ROOT_DIR)/certs/ca.crt -ca-key=$(ROOT_DIR)/certs/ca.key \
-config=$(ROOT_DIR)/config.json -profile=server \
$(ROOT_DIR)/server-csr.json | cfssljson -bare $(ROOT_DIR)/certs/server
@mv $(ROOT_DIR)/certs/server.pem $(ROOT_DIR)/certs/server.crt
@mv $(ROOT_DIR)/certs/server-key.pem $(ROOT_DIR)/certs/server.key
chmod 644 $(ROOT_DIR)/certs/*
mariadb-secret:
@kubectl -n kamaji-system create secret generic mysql-config \
--from-file=$(ROOT_DIR)/certs/ca.crt --from-file=$(ROOT_DIR)/certs/ca.key \
--from-file=$(ROOT_DIR)/certs/server.key --from-file=$(ROOT_DIR)/certs/server.crt \
--from-file=$(ROOT_DIR)/mysql-ssl.cnf \
--from-literal=MYSQL_ROOT_PASSWORD=root \
--dry-run=client -o yaml | kubectl apply -f -
mariadb-kine-secret:
@\
CA=$$(cat $(ROOT_DIR)/certs/ca.crt | base64 | tr -d '\n') \
CRT=$$(cat $(ROOT_DIR)/certs/server.crt | base64 | tr -d '\n') \
KEY=$$(cat $(ROOT_DIR)/certs/server.key | base64 | tr -d '\n') \
ROOT_USERNAME=$$(echo -n root | base64) \
ROOT_PASSWORD=$$(kubectl -n kamaji-system get secret mysql-config -o jsonpath='{.data.MYSQL_ROOT_PASSWORD}') \
envsubst < $(ROOT_DIR)/../secret.yaml | kubectl -n kamaji-system apply -f -
mariadb-deployment:
@kubectl -n kamaji-system apply -f $(ROOT_DIR)/mariadb.yaml
mariadb-destroy:
@kubectl delete -n kamaji-system -f $(ROOT_DIR)/mariadb.yaml --ignore-not-found
@kubectl delete -n kamaji-system secret mysql-config --ignore-not-found
@kubectl delete -n kamaji-system secret kine-secret --ignore-not-found

View File

@@ -0,0 +1,82 @@
# MySQL as Kubernetes Storage
Kamaji offers the possibility of having a different storage system than `ETCD` thanks to [kine](https://github.com/k3s-io/kine). One of the implementations is [MySQL](https://www.mysql.com/).
Kamaji project is developed using [kind](https://kind.sigs.k8s.io), therefore, MySQL (or [MariaDB](https://mariadb.org/) in this case) will be deployed into the local kubernetes cluster in order to be used as storage for the tenants.
There is a Makefile to help with the process:
# Setup
Setup of the MySQL/MariaDB backend can be easily issued with a single command.
```bash
$ make mariadb
```
This action will perform all the necessary stuffs to have MariaDB as Kubernetes storage backend using kine.
```shell
rm -rf /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs && mkdir /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs
cfssl gencert -initca /home/prometherion/Documents/clastix/kamaji/deploy/mysql/ca-csr.json | cfssljson -bare /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/ca
2022/08/18 23:52:56 [INFO] generating a new CA key and certificate from CSR
2022/08/18 23:52:56 [INFO] generate received request
2022/08/18 23:52:56 [INFO] received CSR
2022/08/18 23:52:56 [INFO] generating key: rsa-2048
2022/08/18 23:52:56 [INFO] encoded CSR
2022/08/18 23:52:56 [INFO] signed certificate with serial number 310428005543054656774215122317606431230766314770
cfssl gencert -ca=/home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/ca.crt -ca-key=/home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/ca.key \
-config=/home/prometherion/Documents/clastix/kamaji/deploy/mysql/config.json -profile=server \
/home/prometherion/Documents/clastix/kamaji/deploy/mysql/server-csr.json | cfssljson -bare /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/server
2022/08/18 23:52:56 [INFO] generate received request
2022/08/18 23:52:56 [INFO] received CSR
2022/08/18 23:52:56 [INFO] generating key: rsa-2048
2022/08/18 23:52:56 [INFO] encoded CSR
2022/08/18 23:52:56 [INFO] signed certificate with serial number 582698914718104852311252458344736030793138969927
chmod 644 /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/*
secret/mysql-config created
secret/kine-secret created
serviceaccount/mariadb created
service/mariadb created
deployment.apps/mariadb created
persistentvolumeclaim/pvc-mariadb created
```
## Certificate creation
```bash
$ make mariadb-certificates
```
Communication between kine and the backend is encrypted, therefore, a CA and a certificate from it must be created.
## Secret Deployment
```bash
$ make mariadb-secrets
```
Previous certificates and MySQL configuration have to be available in order to be used.
They will be under the secret `kamaji-system:mysql-config`, used by the MySQL/MariaDB instance.
## Kine Secret
```bash
$ make mariadb-kine-secret
```
Organize the required Kine data such as username, password, CA, certificate, and private key to be stored in the Kamaji desired format.
## Deployment
```bash
$ make mariadb-deployment
```
Finally, starts the MySQL/MariaDB installation with all the required settings, such as SSL connection, and configuration.
# Cleanup
```bash
$ make mariadb-destroy
```

View File

@@ -0,0 +1,36 @@
ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
postgresql: cnpg-setup cnpg-deploy postgresql-secret postgresql-kine-secret
cnpg-setup:
@kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.16.0.yaml
cnpg-deploy:
@kubectl -n cnpg-system rollout status deployment/cnpg-controller-manager
@kubectl -n kamaji-system apply -f postgresql.yaml
@while ! kubectl -n kamaji-system get secret postgresql-superuser > /dev/null 2>&1; do sleep 1; done
CNPG = $(shell git rev-parse --show-toplevel)/bin/kubectl-cnpg
cnpg:
@test -f $(shell git rev-parse --show-toplevel)/bin/kubectl-cnpg || curl -sSfL \
https://github.com/cloudnative-pg/cloudnative-pg/raw/main/hack/install-cnpg-plugin.sh | \
sh -s -- -b $(shell git rev-parse --show-toplevel)/bin
postgresql-secret: cnpg
@kubectl -n kamaji-system get secret postgres-root-cert > /dev/null 2>&1 || $(CNPG) certificate postgres-root-cert \
--cnpg-cluster postgresql \
--cnpg-user $$(kubectl -n kamaji-system get secret postgresql-superuser -o jsonpath='{.data.username}' | base64 -d)
postgresql-kine-secret:
@\
CA=$$(kubectl -n kamaji-system get secret postgresql-ca -o jsonpath='{.data.ca\.crt}') \
CRT=$$(kubectl -n kamaji-system get secret postgres-root-cert -o jsonpath='{.data.tls\.crt}') \
KEY=$$(kubectl -n kamaji-system get secret postgres-root-cert -o jsonpath='{.data.tls\.key}') \
ROOT_USERNAME=$$(kubectl -n kamaji-system get secret postgresql-superuser -o jsonpath='{.data.username}' ) \
ROOT_PASSWORD=$$(kubectl -n kamaji-system get secret postgresql-superuser -o jsonpath='{.data.password}' ) \
envsubst < $(ROOT_DIR)/../secret.yaml | kubectl -n kamaji-system apply -f -
postgresql-destroy:
@kubectl delete -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.16.0.yaml --ignore-not-found && \
kubectl delete secret postgres-root-cert --ignore-not-found && \
kubectl delete secret kine-secret --ignore-not-found

View File

@@ -0,0 +1,76 @@
# PostgreSQL as Kubernetes Storage
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine). One of the implementations is [PostgreSQL](https://www.postgresql.org/).
Kamaji project is developed using [kind](https://kind.sigs.k8s.io), therefore, a PostgreSQL instance must be deployed in advance into the local kubernetes cluster in order to be used as storage for the tenants.
For the sake of simplicity, the [cloudnative-pg](https://cloudnative-pg.io/) Operator will be used to simplify the setup of it.
There is a Makefile to help with the process:
## Setup
```bash
$ make postgresql
```
This target will install the `cloudnative-pg`, creating the PostgreSQL instance in the Kamaji Namespace, along with the generation of the required Secret resource for the kine integration.
This action is idempotent and doesn't overwrite values if they already exist.
```shell
namespace/cnpg-system unchanged
customresourcedefinition.apiextensions.k8s.io/backups.postgresql.cnpg.io configured
customresourcedefinition.apiextensions.k8s.io/clusters.postgresql.cnpg.io configured
customresourcedefinition.apiextensions.k8s.io/poolers.postgresql.cnpg.io configured
customresourcedefinition.apiextensions.k8s.io/scheduledbackups.postgresql.cnpg.io configured
serviceaccount/cnpg-manager unchanged
clusterrole.rbac.authorization.k8s.io/cnpg-manager configured
clusterrolebinding.rbac.authorization.k8s.io/cnpg-manager-rolebinding unchanged
configmap/cnpg-default-monitoring unchanged
service/cnpg-webhook-service unchanged
deployment.apps/cnpg-controller-manager unchanged
mutatingwebhookconfiguration.admissionregistration.k8s.io/cnpg-mutating-webhook-configuration configured
validatingwebhookconfiguration.admissionregistration.k8s.io/cnpg-validating-webhook-configuration configured
deployment "cnpg-controller-manager" successfully rolled out
cluster.postgresql.cnpg.io/postgresql unchanged
secret/postgres-root-cert created
secret/kine-secret created
```
## Operator setup
```bash
$ make cnpg-setup
```
This target will apply all the required manifests with the `cloudnative-pg` CRD, and required RBAC, and Deployment.
Release [v1.16.0](https://github.com/cloudnative-pg/cloudnative-pg/releases/tag/v1.16.0) has been tested successfully.
## SSL certificate Secret generation
```bash
$ make postgresql-secret
```
This target will download locally the `kubectl-cnpg` utility to generate an SSL certificate required to secure the connection to the PostgreSQL instance.
## Kine Secret generation
```bash
$ make postgresql-kine-secret
```
Generate the Kine secret required for Kamaji.
> Requires the generation of the `postgresql-secret`
## Teardown
```bash
$ make postgresql-destroy
```
This will lead to the deletion of the `cloudnative-pg` Operator, along with any instance, and related secrets.
This action is idempotent.

View File

@@ -0,0 +1,13 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: postgresql
spec:
description: PostgreSQL cluster used by Kamaji along with kine
instances: 3
postgresql:
pg_hba:
- hostssl app all all cert # makes authentication entirely based on certificates
primaryUpdateStrategy: unsupervised
storage:
size: 1Gi

14
deploy/kine/secret.yaml Normal file
View File

@@ -0,0 +1,14 @@
# secret.yaml is the Secret object that Kamaji is expecting to user to connect to the Kine SQL datastore:
# certificates keys are required, username and password are optional.
apiVersion: v1
kind: Secret
data:
ca.crt: ${CA}
server.crt: ${CRT}
server.key: ${KEY}
username: ${ROOT_USERNAME}
password: ${ROOT_PASSWORD}
metadata:
creationTimestamp: null
name: kine-secret
type: kamaji.clastix.io/kine

View File

@@ -1,31 +0,0 @@
mariadb_path := $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
.PHONY: mariadb mariadb-certificates mariadb-secrets
mariadb: mariadb-certificates mariadb-secrets mariadb-deployment
mariadb-certificates:
rm -rf $(mariadb_path)/certs && mkdir $(mariadb_path)/certs
cfssl gencert -initca $(mariadb_path)/ca-csr.json | cfssljson -bare $(mariadb_path)/certs/ca
@mv $(mariadb_path)/certs/ca.pem $(mariadb_path)/certs/ca.crt
@mv $(mariadb_path)/certs/ca-key.pem $(mariadb_path)/certs/ca.key
cfssl gencert -ca=$(mariadb_path)/certs/ca.crt -ca-key=$(mariadb_path)/certs/ca.key \
-config=$(mariadb_path)/config.json -profile=server \
$(mariadb_path)/server-csr.json | cfssljson -bare $(mariadb_path)/certs/server
@mv $(mariadb_path)/certs/server.pem $(mariadb_path)/certs/server.crt
@mv $(mariadb_path)/certs/server-key.pem $(mariadb_path)/certs/server.key
chmod 644 $(mariadb_path)/certs/*
mariadb-secrets:
@kubectl -n kamaji-system create secret generic mysql-config \
--from-file=$(mariadb_path)/certs/ca.crt --from-file=$(mariadb_path)/certs/ca.key \
--from-file=$(mariadb_path)/certs/server.key --from-file=$(mariadb_path)/certs/server.crt \
--from-file=$(mariadb_path)/mysql-ssl.cnf \
--from-literal=MYSQL_ROOT_PASSWORD=root
mariadb-deployment:
@kubectl -n kamaji-system apply -f $(mariadb_path)/mariadb.yaml
destroy:
@kubectl delete -n kamaji-system -f $(mariadb_path)/mariadb.yaml
@kubectl delete -n kamaji-system secret mysql-config

View File

@@ -1,43 +0,0 @@
# MySQL as Kubernetes Storage
Kamaji offers the possibility of having a different storage system than `ETCD` thanks to [kine](https://github.com/k3s-io/kine). One of the implementations is [MySQL](https://www.mysql.com/).
Kamaji project is developed using [kind](https://kind.sigs.k8s.io), therefore, MySQL (or [MariaDB](https://mariadb.org/) in this case) will be deployed into the local kubernetes cluster in order to be used as storage for the tenants.
There is a Makefile to help with the process:
* **Full Installation**
```bash
$ make mariadb
```
This action will perform all the necessary stuffs to have MariaDB as kubernetes storage backend using kine.
* **Certificate creation**
```bash
$ make mariadb-certificates
```
Communication between kine and the backend is encrypted, therefore, some certificates must be created.
* **Secret Deployment**
```bash
$ make mariadb-secrets
```
Previous certificates and MySQL configuration have to be available in order to be used. They will be under the secret `kamaji-system:mysql-config`.
* **Deployment**
```bash
$ make mariadb-deployment
```
* **Uninstall Everything**
```bash
$ make destroy
```

4
deploy/nodes-prerequisites.sh Normal file → Executable file
View File

@@ -18,7 +18,9 @@ EOF
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh ${USER}@${HOST} -t 'sudo apt update && sudo apt install -y containerd'
ssh ${USER}@${HOST} -t 'sudo systemctl start containerd && sudo systemctl enable containerd'
ssh ${USER}@${HOST} -t 'sudo mkdir -p /etc/containerd'
ssh ${USER}@${HOST} -t 'containerd config default | sed -e "s#SystemdCgroup = false#SystemdCgroup = true#g" | sudo tee -a /etc/containerd/config.toml'
ssh ${USER}@${HOST} -t 'sudo systemctl restart containerd && sudo systemctl enable containerd'
scp containerd.conf ${USER}@${HOST}:
ssh ${USER}@${HOST} -t 'sudo chown -R root:root containerd.conf && sudo mv containerd.conf /etc/modules-load.d/containerd.conf'
ssh ${USER}@${HOST} -t 'sudo modprobe overlay && sudo modprobe br_netfilter'

View File

@@ -21,10 +21,12 @@ runcmd:
- sudo modprobe overlay
- sudo modprobe br_netfilter
- sudo sysctl --system
- sudo systemctl start containerd
- sudo mkdir -p /etc/containerd
- containerd config default | sed -e 's#SystemdCgroup = false#SystemdCgroup = true#g' | sudo tee -a /etc/containerd/config.toml
- sudo systemctl restart containerd
- sudo systemctl enable containerd
- sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
- echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
- sudo apt update
- sudo apt install -y kubelet kubeadm kubectl containerd
- sudo apt-mark hold kubelet kubeadm kubectl
- sudo apt install -y kubelet=1.23.5-00 kubeadm=1.23.5-00 kubectl=1.23.5-00
- sudo apt-mark hold kubelet kubeadm kubectl

View File

@@ -1,10 +1,13 @@
# Kamaji documentation
- [Core Concepts](./concepts.md)
- [Architecture](./architecture.md)
- [Concepts](./concepts.md)
- [Getting started](./getting-started-with-kamaji.md)
- [Kamaji Deployment](./kamaji-deployment-guide.md)
- [Tenant deployment](./kamaji-tenant-deployment-guide.md)
- Deployment on cloud providers:
- [Azure](./kamaji-azure-deployment-guide.md)
- Guides:
- [Deploy Kamaji](./kamaji-deployment-guide.md)
- [Deploy Kamaji on Azure](./kamaji-azure-deployment-guide.md)
- Deploy Kamaji on AWS
- Deploy Kamaji on GCP
- Deploy Kamaji on OpenStack
- [Reference](./reference.md)
- [Versioning](./versioning.md)

View File

@@ -1 +1,31 @@
# Kamaji concepts
# Core Concepts
Kamaji is a Kubernetes Operator. It turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_.
## Tenant Control Plane
What makes Kamaji special is that the Control Plane of a _“tenant cluster”_ is just one or more regular pods running in a namespace of the _“admin cluster”_ instead of a dedicated set of Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate. The Tenant Control Plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
High Availability and rolling updates of the Tenant Control Plane pods are provided by a regular Deployment. Autoscaling based on the metrics is available. A Service is used to espose the Tenant Control Plane outside of the _“admin cluster”_. The `LoadBalancer` service type is used, `NodePort` and `ClusterIP` with an Ingress Controller are still viable options, depending on the case.
Kamaji offers a [Custom Resource Definition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing a Tenant Control Plane. This *CRD* is called `TenantControlPlane`, or `tcp` in short.
## Tenant worker nodes
And what about the tenant worker nodes? They are just _"worker nodes"_, i.e. regular virtual or bare metal machines, connecting to the APIs server of the Tenant Control Plane. Kamaji's goal is to manage the lifecycle of hundreds of these _“tenant clusters”_, not only one, so how to add another tenant cluster to Kamaji? As you could expect, you have just deploys a new Tenant Control Plane in one of the _“admin cluster”_ namespace, and then joins the tenant worker nodes to it.
All the tenant clusters built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves.
## Save the state
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each tenant cluster saves the state to be able to store and retrieve data. A dedicated `etcd` cluster for each tenant cluster doesnt scale well for a managed service because `etcd` data persistence can be cumbersome at scale, rising the operational effort to mitigate it. So we have to find an alternative keeping in mind our goal for a resilient and cost-optimized solution at the same time. As we can deploy any Kubernetes cluster with an external `etcd` cluster, we explored this option for the tenant control planes. On the admin cluster, we deploy a multi-tenant `etcd` cluster storing the state of multiple tenant clusters.
With this solution, the resiliency is guaranteed by the usual `etcd` mechanism, and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that we have to operate an external `etcd` cluster, in addition to `etcd` of the _“admin cluster”_ and manage the access to be sure that each _“tenant cluster”_ uses only its data. Also, there are limits in size in `etcd`, defaulted to 2GB and configurable to a maximum of 8GB. Were solving this issue by pooling multiple `etcd` togheter and sharding the Tenant Control Planes.
Optionally, Kamaji offers the possibility of using a different storage system than `etcd` to save the state of the tenants' clusters, like MySQL or PostgreSQL compatible databases, thanks to the [kine](https://github.com/k3s-io/kine) integration.
## Requirements of design
These are requirements of design behind Kamaji:
- Communication between the _“admin cluster”_ and a _“tenant cluster”_ is unidirectional. The _“admin cluster”_ manages a _“tenant cluster”_, but a _“tenant cluster”_ has no awareness of the _“admin cluster”_.
- Communication between different _“tenant clusters”_ is not allowed.
- The worker nodes of tenant should not run anything beyond tenant's workloads.
Goals and scope may vary as the project evolves.

View File

@@ -49,54 +49,17 @@ At this moment you will have your KinD up and running and ETCD cluster in multit
Now you're ready to [install Kamaji operator](#install-kamaji).
#### Kine MySQL
#### Kine
> The MySQL-compatible cluster provisioning is omitted here.
Kamaji offers the possibility of using a different storage system than `ETCD` for the tenants, like MySQL compatible databases.
Kamaji offers the possibility of using a different storage system than `ETCD` for the tenants, like MySQL or PostgreSQL compatible databases.
Once a compatible-mysql database is running, we need to provide information about it to kamaji:
Read it more in the provided [guide](../deploy/kine/README.md).
```
--etcd-storage-type=kine-mysql
--kine-mysql-host=<database host>
--kine-mysql-port=<database port>
--kine-mysql-secret-name=<secret name>
--kine-mysql-secret-namespace=<secret namespace>
```
The secret with the configuration and certificates for mysql should look like:
```yaml
apiVersion: v1
data:
MYSQL_ROOT_PASSWORD: ...
ca.crt: ...
ca.key: ...
mysql-ssl.cnf: ...
server.crt: ...
server.key: ...
kind: Secret
metadata:
creationTimestamp: "2022-06-30T08:03:15Z"
name: mysql-config
namespace: kamaji-system
resourceVersion: "32228"
uid: 51b155a1-426c-42d2-8147-be680bf458a6
type: Opaque
```
and `mysql-ssl.cnf`:
```
[mysqld]
ssl-ca=/etc/mysql/conf.d/ca.crt
ssl-cert=/etc/mysql/conf.d/server.crt
ssl-key=/etc/mysql/conf.d/server.key
require_secure_transport=ON
```
You can read more about it [here](../deploy/mysql/README.md).
Assuming you adjusted the [Kamaji manifest](./config/install.yaml) to connect to the MySQL-compatible database, you can now install it.
Assuming you adjusted the [Kamaji manifest](./config/install.yaml) to connect to Kine and compatible database using the proper driver, you can now install it.
### Install Kamaji
@@ -134,8 +97,6 @@ spec:
tenant.clastix.io: tenant1
kind.clastix.io: service
serviceType: NodePort
ingress:
enabled: false
kubernetes:
version: "v1.23.4"
kubelet:

View File

@@ -1,42 +1,48 @@
# Setup Kamaji on Azure
In this section, we're going to setup Kamaji on MS Azure:
This guide will lead you through the process of creating a working Kamaji setup on on MS Azure. It requires:
- one bootstrap local workstation
- a regular AKS cluster as Kamaji Admin Cluster
- a multi-tenant etcd internal cluster running on AKS
- an arbitrary number of Azure virtual machines hosting `Tenant`s' workloads
- an AKS Kubernetes cluster to run the Admin and Tenant Control Planes
- an arbitrary number of Azure virtual machines to host `Tenant`s' workloads
## Bootstrap machine
This getting started guide is supposed to be run from a remote or local bootstrap machine.
First, prepare the workspace directory:
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
* [Access Admin cluster](#access-admin-cluster)
* [Install Kamaji controller](#install-kamaji-controller)
* [Create Tenant Cluster](#create-tenant-cluster)
* [Cleanup](#cleanup)
```
## Prepare the bootstrap workspace
This guide is supposed to be run from a remote or local bootstrap machine. First, clone the repo and prepare the workspace directory:
```bash
git clone https://github.com/clastix/kamaji
cd kamaji/deploy
```
1. Follow the instructions in [Prepare the bootstrap workspace](./getting-started-with-kamaji.md#prepare-the-bootstrap-workspace).
2. Install the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli).
3. Make sure you have a valid Azure subscription
4. Login to Azure:
We assume you have installed on your workstation:
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
- [helm](https://helm.sh/docs/intro/install/)
- [jq](https://stedolan.github.io/jq/)
- [openssl](https://www.openssl.org/)
- [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)
Make sure you have a valid Azure subscription, and login to Azure:
```bash
az account set --subscription "MySubscription"
az login
```
> Currently, the Kamaji setup, including Admin and Tenant clusters need to be deployed within the same Azure region. Cross-regions deployments are not (yet) validated.
> Currently, the Kamaji setup, including Admin and Tenant clusters need to be deployed within the same Azure region. Cross-regions deployments are not supported.
## Setup Admin cluster on AKS
Throughout the instructions, shell variables are used to indicate values that you should adjust to your own Azure environment:
## Access Admin cluster
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters. For this guide, we're going to use an instance of Azure Kubernetes Service - AKS as the Admin Cluster.
Throughout the following instructions, shell variables are used to indicate values that you should adjust to your own Azure environment:
```bash
source kamaji-azure.env
```
> we use the Azure CLI to setup the Kamaji Admin cluster on AKS.
```
az group create \
--name $KAMAJI_RG \
--location $KAMAJI_REGION
@@ -66,34 +72,50 @@ And check you can access:
kubectl cluster-info
```
## Setup internal multi-tenant etcd
Follow the instructions [here](./kamaji-deployment-guide.md#setup-internal-multi-tenant-etcd).
## Install Kamaji
There are multiple ways to deploy Kamaji, including a [single YAML file](../config/install.yaml) and [Helm Chart](../charts/kamaji).
## Install Kamaji controller
Follow the instructions [here](./kamaji-deployment-guide.md#install-kamaji-controller).
### Multi-tenant datastore
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters.
Install a multi-tenant `etcd` in the admin cluster as three replicas StatefulSet with data persistence.
The Helm [Chart](../charts/kamaji/) provides the installation of an internal `etcd`.
However, an externally managed `etcd` is highly recommended.
If you'd like to use an external one, you can specify the overrides by setting the value `etcd.deploy=false`.
## Create Tenant Clusters
To create a Tenant Cluster in Kamaji on AKS, we have to work on both the Kamaji and Azure infrastructure sides.
Optionally, Kamaji offers the possibility of using a different storage system than `etcd` for the tenants' clusters, like MySQL or PostgreSQL compatible database, thanks to the [kine](https://github.com/k3s-io/kine) integration documented [here](../deploy/kine/README.md).
```
source kamaji-tenant-azure.env
### Install with Helm Chart
Install with the `helm` in a dedicated namespace of the Admin cluster:
```bash
helm install --create-namespace --namespace kamaji-system kamaji clastix/kamaji
```
### On Kamaji side
The Kamaji controller and the multi-tenant `etcd` are now running:
```bash
kubectl -n kamaji-system get pods
NAME READY STATUS RESTARTS AGE
etcd-0 1/1 Running 0 120m
etcd-1 1/1 Running 0 120m
etcd-2 1/1 Running 0 119m
kamaji-857fcdf599-4fb2p 2/2 Running 0 120m
```
You just turned your AKS cluster into a Kamaji cluster to run multiple Tenant Control Planes.
## Create Tenant Cluster
### Tenant Control Plane
With Kamaji on AKS, the tenant control plane is accessible:
- from tenant work nodes through an internal loadbalancer as `https://${TENANT_ADDR}:6443`
- from tenant admin user through an external loadbalancer `https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.azure.com:443`
- from tenant worker nodes through an internal loadbalancer
- from tenant admin user through an external loadbalancer responding to `https://${TENANT_NAME}.${TENANT_NAME}.${TENANT_DOMAIN}:443`
Where `TENANT_ADDR` is the Azure internal IP address assigned to the LoadBalancer service created by Kamaji to expose the Tenant Control Plane endpoint.
#### Create the Tenant Control Plane
Create the manifest for Tenant Control Plane:
Create a tenant control plane of example:
```yaml
cat > ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml <<EOF
---
apiVersion: kamaji.clastix.io/v1alpha1
kind: TenantControlPlane
metadata:
@@ -102,24 +124,37 @@ metadata:
spec:
controlPlane:
deployment:
replicas: 2
replicas: 3
additionalMetadata:
annotations:
environment.clastix.io: ${TENANT_NAME}
labels:
tenant.clastix.io: ${TENANT_NAME}
kind.clastix.io: deployment
extraArgs:
apiServer: []
controllerManager: []
scheduler: []
resources:
apiServer:
requests:
cpu: 500m
memory: 512Mi
limits: {}
controllerManager:
requests:
cpu: 250m
memory: 256Mi
limits: {}
scheduler:
requests:
cpu: 250m
memory: 256Mi
limits: {}
service:
additionalMetadata:
annotations:
environment.clastix.io: ${TENANT_NAME}
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
labels:
tenant.clastix.io: ${TENANT_NAME}
kind.clastix.io: service
annotations:
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
serviceType: LoadBalancer
ingress:
enabled: false
kubernetes:
version: ${TENANT_VERSION}
kubelet:
@@ -128,9 +163,9 @@ spec:
- ResourceQuota
- LimitRanger
networkProfile:
port: 6443
port: ${TENANT_PORT}
certSANs:
- ${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.azure.com
- ${TENANT_NAME}.${TENANT_DOMAIN}
serviceCidr: ${TENANT_SVC_CIDR}
podCidr: ${TENANT_POD_CIDR}
dnsServiceIPs:
@@ -138,6 +173,13 @@ spec:
addons:
coreDNS: {}
kubeProxy: {}
konnectivity:
proxyPort: ${TENANT_PROXY_PORT}
resources:
requests:
cpu: 100m
memory: 128Mi
limits: {}
---
apiVersion: v1
kind: Service
@@ -155,49 +197,24 @@ spec:
kamaji.clastix.io/soot: ${TENANT_NAME}
type: LoadBalancer
EOF
```
Make sure:
- the `tcp.spec.controlPlane.service.serviceType=LoadBalancer` and the following annotation: `service.beta.kubernetes.io/azure-load-balancer-internal=true` is set. This tells AKS to expose the service within an Azure internal loadbalancer.
- the public loadbalancer service has the following annotation: `service.beta.kubernetes.io/azure-dns-label-name=${TENANT_NAME}` to expose the Tenant Control Plane with domain name: `${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.azure.com`.
Create the Tenant Control Plane
```
kubectl create namespace ${TENANT_NAMESPACE}
kubectl apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
```
And check it out:
Make sure:
```
$ kubectl get tcp
NAME VERSION CONTROL-PLANE-ENDPOINT KUBECONFIG PRIVATE AGE
tenant-00 v1.23.4 10.240.0.100:6443 tenant-00-admin-kubeconfig true 46m
- the following annotation: `service.beta.kubernetes.io/azure-load-balancer-internal=true` is set on the `tcp` service. It tells Azure to expose the service within an internal loadbalancer.
$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
tenant-00 LoadBalancer 10.0.223.161 10.240.0.100 6443:31902/TCP 46m
tenant-00-public LoadBalancer 10.0.205.97 20.101.215.149 443:30697/TCP 19h
- the following annotation: `service.beta.kubernetes.io/azure-dns-label-name=${TENANT_NAME}` is set the public loadbalancer service. It tells Azure to expose the Tenant Control Plane with domain name: `${TENANT_NAME}.${TENANT_DOMAIN}`.
$ kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
tenant-00 2/2 2 2 47m
```
### Working with Tenant Control Plane
Collect the internal IP address of Azure loadbalancer where the Tenant control Plane is exposed:
```bash
TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."status.loadBalancer.ingress[].ip")
```
#### Working with Tenant Control Plane
Check the access to the Tenant Control Plane:
```
curl -k https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.azure.com/healthz
curl -k https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.azure.com/version
```
Let's retrieve the `kubeconfig` in order to work with it:
@@ -231,8 +248,6 @@ NAME ENDPOINTS AGE
kubernetes 10.240.0.100:6443 57m
```
Make sure it's `${TENANT_ADDR}:6443`.
### Prepare the Infrastructure for the Tenant virtual machines
Kamaji provides Control Plane as a Service, so the tenant user can join his own virtual machines as worker nodes. Each tenant can place his virtual machines in a dedicated Azure virtual network.
@@ -334,7 +349,6 @@ JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig=
A bash loop will be used to join all the available nodes.
```bash
HOSTS=($(az vmss list-instance-public-ips \
--resource-group $TENANT_RG \
@@ -352,13 +366,12 @@ done
Checking the nodes:
```bash
kubectl get nodes --kubeconfig=${CLUSTER_NAMESPACE}-${CLUSTER_NAME}.kubeconfig
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
kamaji-tenant-worker-00 NotReady <none> 1m v1.23.4
kamaji-tenant-worker-01 NotReady <none> 1m v1.23.4
kamaji-tenant-worker-02 NotReady <none> 1m v1.23.4
kamaji-tenant-worker-03 NotReady <none> 1m v1.23.4
NAME STATUS ROLES AGE VERSION
tenant-00-000000 NotReady <none> 112s v1.23.5
tenant-00-000002 NotReady <none> 92s v1.23.5
tenant-00-000003 NotReady <none> 71s v1.23.5
```
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In our case, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico).
@@ -371,7 +384,7 @@ kubectl apply -f calico-cni/calico-azure.yaml --kubeconfig=${TENANT_NAMESPACE}-$
And after a while, `kube-system` pods will be running.
```bash
kubectl get po -n kube-system --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get po -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-8594699699-dlhbj 1/1 Running 0 3m
@@ -386,14 +399,14 @@ kube-proxy-m48v4 1/1 Running 0 3m
And the nodes will be ready
```bash
kubectl get nodes --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
kamaji-tenant-worker-01 Ready <none> 10m v1.23.4
kamaji-tenant-worker-02 Ready <none> 10m v1.23.4
NAME STATUS ROLES AGE VERSION
tenant-00-000000 Ready <none> 3m38s v1.23.5
tenant-00-000002 Ready <none> 3m18s v1.23.5
tenant-00-000003 Ready <none> 2m57s v1.23.5
```
## Cleanup
To get rid of the Tenant infrastructure, remove the RESOURCE_GROUP:

View File

@@ -1,603 +1,477 @@
# Install a Kamaji environment
This guide will lead you through the process of creating a basic working Kamaji setup.
# Setup Kamaji
This guide will lead you through the process of creating a working Kamaji setup on a generic Kubernetes cluster. It requires:
Kamaji requires:
- one bootstrap local workstation
- a Kubernetes cluster 1.22+, to run the Admin and Tenant Control Planes
- an arbitrary number of machines to host Tenants' workloads
- (optional) a bootstrap node;
- a multi-tenant `etcd` cluster made of 3 nodes hosting the datastore for the `Tenant`s' clusters
- a Kubernetes cluster, running the admin and Tenant Control Planes
- an arbitrary number of machines hosting `Tenant`s' workloads
> In this guide, we assume all machines are running `Ubuntu 20.04`.
> In this guide, we assume the machines are running `Ubuntu 20.04`.
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
* [Access Admin cluster](#access-admin-cluster)
* [Setup external multi-tenant etcd](#setup-external-multi-tenant-etcd)
* [Setup internal multi-tenant etcd](#setup-internal-multi-tenant-etcd)
* [Install Kamaji controller](#install-kamaji-controller)
* [Setup Tenant cluster](#setup-tenant-cluster)
* [Create Tenant Cluster](#create-tenant-cluster)
* [Cleanup](#cleanup)
## Prepare the bootstrap workspace
This guide is supposed to be run from a remote or local bootstrap machine.
First, prepare the workspace directory:
This guide is supposed to be run from a remote or local bootstrap machine. First, clone the repo and prepare the workspace directory:
```
```bash
git clone https://github.com/clastix/kamaji
cd kamaji/deploy
```
Throughout the instructions, shell variables are used to indicate values that you should adjust to your own environment.
### Install required tools
On the bootstrap machine, install all the required tools to work with a Kamaji setup.
#### cfssl and cfssljson
The `cfssl` and `cfssljson` command line utilities will be used in addition to `kubeadm` to provision the PKI Infrastructure and generate TLS certificates.
```
wget -q --show-progress --https-only --timestamping \
https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/linux/cfssl \
https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/linux/cfssljson
chmod +x cfssl cfssljson
sudo mv cfssl cfssljson /usr/local/bin/
```
#### Kubernetes tools
Install `kubeadm` and `kubectl`
```bash
sudo apt update && sudo apt install -y apt-transport-https ca-certificates curl && \
sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg && \
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list && \
sudo apt update && sudo apt install -y kubeadm kubectl --allow-change-held-packages && \
sudo apt-mark hold kubeadm kubectl
```
#### etcdctl
For administration of the `etcd` cluster, download and install the `etcdctl` CLI utility on the bootstrap machine
```bash
ETCD_VER=v3.5.1
ETCD_URL=https://storage.googleapis.com/etcd
curl -L ${ETCD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o etcd-${ETCD_VER}-linux-amd64.tar.gz
tar xzvf etcd-${ETCD_VER}-linux-amd64.tar.gz etcd-${ETCD_VER}-linux-amd64/etcdctl
sudo cp etcd-${ETCD_VER}-linux-amd64/etcdctl /usr/bin/etcdctl
rm -rf etcd-${ETCD_VER}-linux-amd64*
```
Verify `etcdctl` version is installed
```bash
etcdctl version
etcdctl version: 3.5.1
API version: 3.5
```
We assume you have installed on your workstation:
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
- [helm](https://helm.sh/docs/intro/install/)
- [jq](https://stedolan.github.io/jq/)
- [openssl](https://www.openssl.org/)
## Access Admin cluster
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes running as pods. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters.
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters.
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. Currently we tested:
Throughout the following instructions, shell variables are used to indicate values that you should adjust to your environment:
- [Kubernetes installed with `kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
- [Azure AKS managed service](./kamaji-on-azure.md).
- [KinD for local development](./getting-started-with-kamaji.md ).
```bash
source kamaji.env
```
The admin cluster should provide:
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the admin cluster should provide:
- CNI module installed, eg. Calico
- Support for LoadBalancer Service Type, eg. MetalLB or, alternatively, an Ingress Controller
- CSI module installed with StorageClass for multi-tenant `etcd`
- Monitoring Stack, eg. Prometheus and Grafana
- CNI module installed, eg. [Calico](https://github.com/projectcalico/calico), [Cilium](https://github.com/cilium/cilium).
- CSI module installed with a Storage Class for the Tenants' `etcd`.
- Support for LoadBalancer Service Type, or alternatively, an Ingress Controller, eg. [ingress-nginx](https://github.com/kubernetes/ingress-nginx), [haproxy](https://github.com/haproxytech/kubernetes-ingress).
- Monitoring Stack, eg. [Prometheus](https://github.com/prometheus-community).
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into Kamaji Admin Cluster.
## Setup external multi-tenant etcd
In this section, we're going to setup a multi-tenant `etcd` cluster on dedicated nodes. Alternatively, if you want to use an internal `etcd` cluster as Kubernetes StatefulSet, jump [here](#setup-internal-multi-tenant-etcd).
## Install Kamaji
There are multiple ways to deploy Kamaji, including a [single YAML file](../config/install.yaml) and [Helm Chart](../charts/kamaji).
### Ensure host access
From the bootstrap machine load the environment for external `etcd` setup:
### Multi-tenant datastore
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters. Install a multi-tenant `etcd` in the admin cluster as three replicas StatefulSet with data persistence. The Helm [Chart](../charts/kamaji/) provides the installation of an internal `etcd`. However, an externally managed `etcd` is highly recommended. If you'd like to use an external one, you can specify the overrides by setting the value `etcd.deploy=false`.
Optionally, Kamaji offers the possibility of using a different storage system than `etcd` for the tenants' clusters, like MySQL compatible database, thanks to the [kine](https://github.com/k3s-io/kine) integration [here](../deploy/kine/mysql/README.md).
### Install with Helm Chart
Install with the `helm` in a dedicated namespace of the Admin cluster:
```bash
source kamaji-external-etcd.env
helm install --create-namespace --namespace kamaji-system kamaji clastix/kamaji
```
The installer requires a user that has access to all hosts. In order to run the installer as a non-root user, first configure passwordless sudo rights each host:
Generate an SSH key on the host you run the installer on:
The Kamaji controller and the multi-tenant `etcd` are now running:
```bash
ssh-keygen -t rsa
kubectl -n kamaji-system get pods
NAME READY STATUS RESTARTS AGE
etcd-0 1/1 Running 0 120m
etcd-1 1/1 Running 0 120m
etcd-2 1/1 Running 0 119m
kamaji-857fcdf599-4fb2p 2/2 Running 0 120m
```
> Do not use a password.
You just turned your Kubernetes cluster into a Kamaji cluster to run multiple Tenant Control Planes.
Distribute the key to the other cluster hosts.
## Create Tenant Cluster
Depending on your environment, use a bash loop:
### Tenant Control Plane
A tenant control plane of example looks like:
```yaml
cat > ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml <<EOF
apiVersion: kamaji.clastix.io/v1alpha1
kind: TenantControlPlane
metadata:
name: ${TENANT_NAME}
namespace: ${TENANT_NAMESPACE}
spec:
controlPlane:
deployment:
replicas: 3
additionalMetadata:
labels:
tenant.clastix.io: ${TENANT_NAME}
extraArgs:
apiServer: []
controllerManager: []
scheduler: []
resources:
apiServer:
requests:
cpu: 500m
memory: 512Mi
limits: {}
controllerManager:
requests:
cpu: 250m
memory: 256Mi
limits: {}
scheduler:
requests:
cpu: 250m
memory: 256Mi
limits: {}
service:
additionalMetadata:
labels:
tenant.clastix.io: ${TENANT_NAME}
serviceType: LoadBalancer
kubernetes:
version: ${TENANT_VERSION}
kubelet:
cgroupfs: systemd
admissionControllers:
- ResourceQuota
- LimitRanger
networkProfile:
port: ${TENANT_PORT}
certSANs:
- ${TENANT_NAME}.${TENANT_DOMAIN}
serviceCidr: ${TENANT_SVC_CIDR}
podCidr: ${TENANT_POD_CIDR}
dnsServiceIPs:
- ${TENANT_DNS_SERVICE}
addons:
coreDNS: {}
kubeProxy: {}
konnectivity:
proxyPort: ${TENANT_PROXY_PORT}
resources:
requests:
cpu: 100m
memory: 128Mi
limits: {}
EOF
kubectl create namespace ${TENANT_NAMESPACE}
kubectl apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
```
After a few minutes, check the created resources in the tenants namespace and when ready it will look similar to the following:
```command
kubectl -n tenants get tcp,deploy,pods,svc
NAME VERSION STATUS CONTROL-PLANE-ENDPOINT KUBECONFIG AGE
tenantcontrolplane.kamaji.clastix.io/tenant-00 v1.23.1 Ready 192.168.32.240:6443 tenant-00-admin-kubeconfig 2m20s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/tenant-00 3/3 3 3 118s
NAME READY STATUS RESTARTS AGE
pod/tenant-00-58847c8cdd-7hc4n 4/4 Running 0 82s
pod/tenant-00-58847c8cdd-ft5xt 4/4 Running 0 82s
pod/tenant-00-58847c8cdd-shc7t 4/4 Running 0 82s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/tenant-00 LoadBalancer 10.32.132.241 192.168.32.240 6443:32152/TCP,8132:32713/TCP 2m20s
```
The regular Tenant Control Plane containers: `kube-apiserver`, `kube-controller-manager`, `kube-scheduler` are running unchanged in the `tcp` pods instead of dedicated machines and they are exposed through a service on the port `6443` of worker nodes in the Admin cluster.
```yaml
apiVersion: v1
kind: Service
metadata:
name: tenant-00
spec:
clusterIP: 10.32.233.177
loadBalancerIP: 192.168.32.240
ports:
- name: kube-apiserver
nodePort: 31073
port: 6443
protocol: TCP
targetPort: 6443
- name: konnectivity-server
nodePort: 32125
port: 8132
protocol: TCP
targetPort: 8132
selector:
kamaji.clastix.io/soot: tenant-00
type: LoadBalancer
```
The `LoadBalancer` service type is used to expose the Tenant Control Plane. However, `NodePort` and `ClusterIP` with an Ingress Controller are still viable options, depending on the case. High Availability and rolling updates of the Tenant Control Plane are provided by the `tcp` Deployment and all the resources reconcilied by the Kamaji controller.
### Konnectivity
In addition to the standard control plane containers, Kamaji creates an instance of [konnectivity-server](https://kubernetes.io/docs/concepts/architecture/control-plane-node-communication/) running as sidecar container in the `tcp` pod and exposed on port `8132` of the `tcp` service.
This is required when the tenant worker nodes are not reachable from the `tcp` pods. The Konnectivity service consists of two parts: the Konnectivity server in the tenant control plane pod and the Konnectivity agents running on the tenant worker nodes. After worker nodes joined the tenant control plane, the Konnectivity agents initiate connections to the Konnectivity server and maintain the network connections. After enabling the Konnectivity service, all control plane to worker nodes traffic goes through these connections.
> In Kamaji, Konnectivity is enabled by default and can be disabled when not required.
### Working with Tenant Control Plane
Collect the external IP address of the `tcp` service:
```bash
HOSTS=(${ETCD0} ${ETCD1} ${ETCD2})
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh-copy-id -i ~/.ssh/id_rsa.pub $HOST;
done
TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."spec.loadBalancerIP")
```
> Alternatively, inject the generated public key into machines metadata.
Confirm that you can access each host from bootstrap machine:
and check it out:
```bash
HOSTS=(${ETCD0} ${ETCD1} ${ETCD2})
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh ${USER}@${HOST} -t 'hostname';
done
curl -k https://${TENANT_ADDR}:${TENANT_PORT}/healthz
curl -k https://${TENANT_ADDR}:${TENANT_PORT}/version
```
### Configure disk layout
As per `etcd` [requirements](https://etcd.io/docs/v3.5/op-guide/hardware/#disks), back `etcd`s storage with a SSD. A SSD usually provides lower write latencies and with less variance than a spinning disk, thus improving the stability and reliability of `etcd`.
For each `etcd` machine, we assume an additional `sdb` disk of 10GB:
```
clastix@kamaji-etcd-00:~$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 16G 0 disk
├─sda1 8:1 0 15.9G 0 part /
├─sda14 8:14 0 4M 0 part
└─sda15 8:15 0 106M 0 part /boot/efi
sdb 8:16 0 10G 0 disk
sr0 11:0 1 4M 0 rom
```
Create partition, format, and mount the `etcd` disk, by running the script below from the bootstrap machine:
> If you already used the `etcd` disks, please make sure to wipe the partitions with `sudo wipefs --all --force /dev/sdb` before to attempt to recreate them.
The `kubeconfig` required to access the Tenant Control Plane is stored in a secret:
```bash
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
ssh ${USER}@${HOST} -t 'echo type=83 | sudo sfdisk -f -q /dev/sdb'
ssh ${USER}@${HOST} -t 'sudo mkfs -F -q -t ext4 /dev/sdb1'
ssh ${USER}@${HOST} -t 'sudo mkdir -p /var/lib/etcd'
ssh ${USER}@${HOST} -t 'sudo e2label /dev/sdb1 ETCD'
ssh ${USER}@${HOST} -t 'echo LABEL=ETCD /var/lib/etcd ext4 defaults 0 1 | sudo tee -a /etc/fstab'
ssh ${USER}@${HOST} -t 'sudo mount -a'
ssh ${USER}@${HOST} -t 'sudo lsblk -f'
done
kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-admin-kubeconfig -o json \
| jq -r '.data["admin.conf"]' \
| base64 -d \
> ${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
```
### Install prerequisites
Use bash script `nodes-prerequisites.sh` to install all the dependencies on all the cluster nodes:
and let's check it out:
```bash
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig cluster-info
Kubernetes control plane is running at https://192.168.32.240:6443
CoreDNS is running at https://192.168.32.240:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
```
Check out how the Tenant control Plane advertises itself to workloads:
```bash
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get svc
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes ClusterIP 10.32.0.1 <none> 443/TCP 6m
```
```bash
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get ep
NAME ENDPOINTS AGE
kubernetes 192.168.32.240:6443 18m
```
And make sure it is `${TENANT_ADDR}:${TENANT_PORT}`.
### Preparing Worker Nodes to join
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other IaC tools.
Use bash script `nodes-prerequisites.sh` to install the dependencies on all the worker nodes:
- Install `containerd` as container runtime
- Install `crictl`, the command line for working with `containerd`
- Install `kubectl`, `kubelet`, and `kubeadm` in the desired version, eg. `v1.24.0`
- Install `kubectl`, `kubelet`, and `kubeadm` in the desired version
> Warning: we assume worker nodes are machines running `Ubuntu 20.04`
Run the installation script:
```bash
VERSION=v1.24.0
./nodes-prerequisites.sh ${VERSION:1} ${HOSTS[@]}
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
./nodes-prerequisites.sh ${TENANT_VERSION:1} ${HOSTS[@]}
```
### Configure kubelet
### Join Command
On each `etcd` node, configure the `kubelet` service to start `etcd` static pods using `containerd` as container runtime, by running the script below from the bootstrap machine:
The current approach for joining nodes is to use the kubeadm one therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable.
```bash
cat << EOF > 20-etcd-service-manager.conf
[Service]
ExecStart=
ExecStart=/usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --cgroup-driver=systemd --container-runtime=remote --container-runtime-endpoint=/run/containerd/containerd.sock
Restart=always
EOF
JOIN_CMD=$(echo "sudo ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command)
```
```
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
scp 20-etcd-service-manager.conf ${USER}@${HOST}:
ssh ${USER}@${HOST} -t 'sudo chown -R root:root 20-etcd-service-manager.conf && sudo mv 20-etcd-service-manager.conf /etc/systemd/system/kubelet.service.d/20-etcd-service-manager.conf'
ssh ${USER}@${HOST} -t 'sudo systemctl daemon-reload'
ssh ${USER}@${HOST} -t 'sudo systemctl start kubelet'
ssh ${USER}@${HOST} -t 'sudo systemctl enable kubelet'
done
### Adding Worker Nodes
rm -f 20-etcd-service-manager.conf
```
### Create configuration
Create temp directories to store files that will end up on `etcd` hosts:
A bash loop will be used to join all the available nodes.
```bash
mkdir -p /tmp/${ETCD0}/ /tmp/${ETCD1}/ /tmp/${ETCD2}/
NAMES=("etcd00" "etcd01" "etcd02")
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
NAME=${NAMES[$i]}
cat <<EOF | sudo tee /tmp/${HOST}/kubeadmcfg.yaml
apiVersion: "kubeadm.k8s.io/v1beta2"
kind: ClusterConfiguration
etcd:
local:
serverCertSANs:
- "${HOST}"
peerCertSANs:
- "${HOST}"
extraArgs:
initial-cluster: ${NAMES[0]}=https://${ETCDHOSTS[0]}:2380,${NAMES[1]}=https://${ETCDHOSTS[1]}:2380,${NAMES[2]}=https://${ETCDHOSTS[2]}:2380
initial-cluster-state: new
name: ${NAME}
listen-peer-urls: https://${HOST}:2380
listen-client-urls: https://${HOST}:2379
advertise-client-urls: https://${HOST}:2379
initial-advertise-peer-urls: https://${HOST}:2380
auto-compaction-mode: periodic
auto-compaction-retention: 5m
quota-backend-bytes: '8589934592'
EOF
done
```
> Note:
>
> ##### Etcd compaction
>
> By enabling `etcd` authentication, it prevents the tenant apiservers (clients of `etcd`) to issue compaction requests. We set `etcd` to automatically compact the keyspace with the `--auto-compaction-*` option with a period of hours or minutes. When `--auto-compaction-mode=periodic` and `--auto-compaction-retention=5m` and writes per minute are about 1000, `etcd` compacts revision 5000 for every 5 minute.
>
> ##### Etcd storage quota
>
> Currently, `etcd` is limited in storage size, defaulted to `2GB` and configurable with `--quota-backend-bytes` flag up to `8GB`. In Kamaji, we use a single `etcd` to store multiple tenant clusters, so we need to increase this size. Please, note `etcd` warns at startup if the configured value exceeds `8GB`.
### Generate certificates
On the bootstrap machine, using `kubeadm` init phase, create and distribute `etcd` CA certificates:
```bash
sudo kubeadm init phase certs etcd-ca
mkdir kamaji
sudo cp -r /etc/kubernetes/pki/etcd kamaji
sudo chown -R ${USER}. kamaji/etcd
```
For each `etcd` host:
```bash
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
sudo kubeadm init phase certs etcd-server --config=/tmp/${HOST}/kubeadmcfg.yaml
sudo kubeadm init phase certs etcd-peer --config=/tmp/${HOST}/kubeadmcfg.yaml
sudo kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST}/kubeadmcfg.yaml
sudo cp -R /etc/kubernetes/pki /tmp/${HOST}/
sudo find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh ${USER}@${HOST} -t ${JOIN_CMD};
done
```
### Startup the cluster
Upload certificates on each `etcd` node and restart the `kubelet`
Checking the nodes:
```bash
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
sudo chown -R ${USER}. /tmp/${HOST}
scp -r /tmp/${HOST}/* ${USER}@${HOST}:
ssh ${USER}@${HOST} -t 'sudo chown -R root:root pki'
ssh ${USER}@${HOST} -t 'sudo mv pki /etc/kubernetes/'
ssh ${USER}@${HOST} -t 'sudo kubeadm init phase etcd local --config=kubeadmcfg.yaml'
ssh ${USER}@${HOST} -t 'sudo systemctl daemon-reload'
ssh ${USER}@${HOST} -t 'sudo systemctl restart kubelet'
done
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
tenant-00-worker-00 NotReady <none> 25s v1.23.5
tenant-00-worker-01 NotReady <none> 17s v1.23.5
tenant-00-worker-02 NotReady <none> 9s v1.23.5
```
This will start the static `etcd` pod on each node and then the cluster gets formed.
Generate certificates for the `root` user
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In our case, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico).
```bash
cat > root-csr.json <<EOF
{
"CN": "root",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
kubectl apply -f calico-cni/calico-crd.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl apply -f calico-cni/calico.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
```
And after a while, `kube-system` pods will be running.
```bash
cfssl gencert \
-ca=kamaji/etcd/ca.crt \
-ca-key=kamaji/etcd/ca.key \
-config=cfssl-cert-config.json \
-profile=client-authentication \
root-csr.json | cfssljson -bare root
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-8594699699-dlhbj 1/1 Running 0 3m
calico-node-kxf6n 1/1 Running 0 3m
calico-node-qtdlw 1/1 Running 0 3m
coredns-64897985d-2v5lc 1/1 Running 0 5m
coredns-64897985d-nq276 1/1 Running 0 5m
kube-proxy-cwdww 1/1 Running 0 3m
kube-proxy-m48v4 1/1 Running 0 3m
```
And the nodes will be ready
```bash
cp root.pem kamaji/etcd/root.crt
cp root-key.pem kamaji/etcd/root.key
rm root*
kubectl get nodes --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
NAME STATUS ROLES AGE VERSION
tenant-00-worker-00 Ready <none> 2m48s v1.23.5
tenant-00-worker-01 Ready <none> 2m40s v1.23.5
tenant-00-worker-02 Ready <none> 2m32s v1.23.5
```
The result should be:
## Smoke test
The tenant cluster is now ready to accept workloads.
Export its `kubeconfig` file
```bash
$ tree kamaji
kamaji
└── etcd
├── ca.crt
├── ca.key
├── root.crt
└── root.key
export KUBECONFIG=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
```
Use the `root` user to check the just formed `etcd` cluster is in health state
#### Deployment
Deploy a `nginx` application on the tenant cluster
```bash
export ETCDCTL_CACERT=kamaji/etcd/ca.crt
export ETCDCTL_CERT=kamaji/etcd/root.crt
export ETCDCTL_KEY=kamaji/etcd/root.key
export ETCDCTL_ENDPOINTS=https://${ETCD0}:2379
etcdctl member list -w table
kubectl create deployment nginx --image=nginx
```
The result should be something like this:
```
+------------------+---------+--------+----------------------------+----------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+--------+----------------------------+----------------------------+------------+
| 72657d6307364226 | started | etcd01 | https://192.168.32.11:2380 | https://192.168.32.11:2379 | false |
| 91eb892c5ee87610 | started | etcd00 | https://192.168.32.10:2380 | https://192.168.32.10:2379 | false |
| e9971c576949c34e | started | etcd02 | https://192.168.32.12:2380 | https://192.168.32.12:2379 | false |
+------------------+---------+--------+----------------------------+----------------------------+------------+
```
### Enable multi-tenancy
The `root` user has full access to `etcd`, must be created before activating authentication. The `root` user must have the `root` role and is allowed to change anything inside `etcd`.
and check the `nginx` pod gets scheduled
```bash
etcdctl user add --no-password=true root
etcdctl role add root
etcdctl user grant-role root root
etcdctl auth enable
kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx-6799fc88d8-4sgcb 1/1 Running 0 33s 172.12.121.1 worker02
```
### Cleanup
If you want to get rid of the etcd cluster, for each node, login and clean it:
#### Port Forwarding
Verify the ability to access applications remotely using port forwarding.
Retrieve the full name of the `nginx` pod:
```bash
HOSTS=(${ETCD0} ${ETCD1} ${ETCD2})
POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath="{.items[0].metadata.name}")
```
Forward port 8080 on your local machine to port 80 of the `nginx` pod:
```bash
kubectl port-forward $POD_NAME 8080:80
Forwarding from 127.0.0.1:8080 -> 80
Forwarding from [::1]:8080 -> 80
```
In a new terminal make an HTTP request using the forwarding address:
```bash
curl --head http://127.0.0.1:8080
HTTP/1.1 200 OK
Server: nginx/1.21.0
Date: Sat, 19 Jun 2021 08:19:01 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 May 2021 12:28:56 GMT
Connection: keep-alive
ETag: "60aced88-264"
Accept-Ranges: bytes
```
Switch back to the previous terminal and stop the port forwarding to the `nginx` pod.
#### Logs
Verify the ability to retrieve container logs.
Print the `nginx` pod logs:
```bash
kubectl logs $POD_NAME
...
127.0.0.1 - - [19/Jun/2021:08:19:01 +0000] "HEAD / HTTP/1.1" 200 0 "-" "curl/7.68.0" "-"
```
#### Kubelet tunnel
Verify the ability to execute commands in a container.
Print the `nginx` version by executing the `nginx -v` command in the `nginx` container:
```bash
kubectl exec -ti $POD_NAME -- nginx -v
nginx version: nginx/1.21.0
```
#### Services
Verify the ability to expose applications using a service.
Expose the `nginx` deployment using a `NodePort` service:
```bash
kubectl expose deployment nginx --port 80 --type NodePort
```
Retrieve the node port assigned to the `nginx` service:
```bash
NODE_PORT=$(kubectl get svc nginx \
--output=jsonpath='{range .spec.ports[0]}{.nodePort}')
```
Retrieve the IP address of a worker instance and make an HTTP request:
```bash
curl -I http://${WORKER0}:${NODE_PORT}
HTTP/1.1 200 OK
Server: nginx/1.21.0
Date: Sat, 19 Jun 2021 09:29:01 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 May 2021 12:28:56 GMT
Connection: keep-alive
ETag: "60aced88-264"
Accept-Ranges: bytes
```
## Cleanup
Remove the worker nodes joined the tenant control plane
```bash
kubectl delete nodes --all --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
```
For each worker node, login and clean it
```bash
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh ${USER}@${HOST} -t 'sudo kubeadm reset -f';
ssh ${USER}@${HOST} -t 'sudo rm -rf /etc/cni/net.d';
ssh ${USER}@${HOST} -t 'sudo systemctl reboot';
done
```
## Setup internal multi-tenant etcd
If you opted for an internal etcd cluster running in the Kamaji admin cluster, follow steps below.
From the bootstrap machine load the environment for internal `etcd` setup:
Delete the tenant control plane from kamaji
```bash
source kamaji-internal-etcd.env
kubectl delete -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
```
### Generate certificates
On the bootstrap machine, using `kubeadm` init phase, create the `etcd` CA certificates:
```bash
sudo kubeadm init phase certs etcd-ca
mkdir kamaji
sudo cp -r /etc/kubernetes/pki/etcd kamaji
sudo chown -R ${USER}. kamaji/etcd
```
Generate the `etcd` certificates for peers:
```
cat << EOF | tee kamaji/etcd/peer-csr.json
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"etcd-0",
"etcd-0.etcd",
"etcd-0.etcd.${ETCD_NAMESPACE}.svc",
"etcd-0.etcd.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-1",
"etcd-1.etcd",
"etcd-1.etcd.${ETCD_NAMESPACE}.svc",
"etcd-1.etcd.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-2",
"etcd-2.etcd",
"etcd-2.etcd.${ETCD_NAMESPACE}.svc",
"etcd-2.etcd.${ETCD_NAMESPACE}.cluster.local"
]
}
EOF
cfssl gencert -ca=kamaji/etcd/ca.crt -ca-key=kamaji/etcd/ca.key \
-config=cfssl-cert-config.json \
-profile=peer-authentication kamaji/etcd/peer-csr.json | cfssljson -bare kamaji/etcd/peer
```
Generate the `etcd` certificates for server:
```
cat << EOF | tee kamaji/etcd/server-csr.json
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"etcd-server",
"etcd-server.${ETCD_NAMESPACE}.svc",
"etcd-server.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-0.etcd.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-1.etcd.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-2.etcd.${ETCD_NAMESPACE}.svc.cluster.local"
]
}
EOF
cfssl gencert -ca=kamaji/etcd/ca.crt -ca-key=kamaji/etcd/ca.key \
-config=cfssl-cert-config.json \
-profile=peer-authentication kamaji/etcd/server-csr.json | cfssljson -bare kamaji/etcd/server
```
Generate certificates for the `root` user of the `etcd`
```
cat << EOF | tee kamaji/etcd/root-csr.json
{
"CN": "root",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
cfssl gencert -ca=kamaji/etcd/ca.crt -ca-key=kamaji/etcd/ca.key \
-config=cfssl-cert-config.json \
-profile=client-authentication kamaji/etcd/root-csr.json | cfssljson -bare kamaji/etcd/root
```
Install the `etcd` in the Kamaji admin cluster
```bash
kubectl create namespace ${ETCD_NAMESPACE}
kubectl -n ${ETCD_NAMESPACE} create secret generic etcd-certs \
--from-file=kamaji/etcd/ca.crt \
--from-file=kamaji/etcd/ca.key \
--from-file=kamaji/etcd/peer-key.pem --from-file=kamaji/etcd/peer.pem \
--from-file=kamaji/etcd/server-key.pem --from-file=kamaji/etcd/server.pem
kubectl -n ${ETCD_NAMESPACE} apply -f etcd/etcd-cluster.yaml
```
Install an `etcd` client to interact with the `etcd` server
```bash
kubectl -n ${ETCD_NAMESPACE} create secret tls root-client-certs \
--key=kamaji/etcd/root-key.pem \
--cert=kamaji/etcd/root.pem
kubectl -n ${ETCD_NAMESPACE} apply -f etcd/etcd-client.yaml
```
Wait the etcd instances discover each other and the cluster is formed:
```bash
kubectl -n ${ETCD_NAMESPACE} wait pod --for=condition=ready -l app=etcd --timeout=120s
echo -n "\nChecking endpoint's health..."
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- /bin/bash -c "etcdctl endpoint health 1>/dev/null 2>/dev/null; until [ \$$? -eq 0 ]; do sleep 10; printf "."; etcdctl endpoint health 1>/dev/null 2>/dev/null; done;"
echo -n "\netcd cluster's health:\n"
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- /bin/bash -c "etcdctl endpoint health"
echo -n "\nWaiting for all members..."
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- /bin/bash -c "until [ \$$(etcdctl member list 2>/dev/null | wc -l) -eq 3 ]; do sleep 10; printf '.'; done;"
@echo -n "\netcd's members:\n"
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- /bin/bash -c "etcdctl member list -w table"
```
### Enable multi-tenancy
The `root` user has full access to `etcd`, must be created before activating authentication. The `root` user must have the `root` role and is allowed to change anything inside `etcd`.
```bash
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- etcdctl user add --no-password=true root
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- etcdctl role add root
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- etcdctl user grant-role root root
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- etcdctl auth enable
```
## Install Kamaji controller
Currently, the behaviour of the Kamaji controller for Tenant Control Plane is controlled by (in this order):
- CLI flags
- Environment variables
- Configuration file `kamaji.yaml` built into the image
By default Kamaji search for the configuration file and uses parameters found inside of it. In case some environment variable are passed, this will override configuration file parameters. In the end, if also a CLI flag is passed, this will override both env vars and config file as well.
There are multiple ways to deploy the Kamaji controller:
- Use the single YAML file installer
- Use Kustomize with Makefile
- Use the Kamaji Helm Chart
The Kamaji controller needs to access the multi-tenant `etcd` in order to provision the access for tenant `kube-apiserver`.
Create the secrets containing the `etcd` certificates
```bash
kubectl create namespace kamaji-system
kubectl -n kamaji-system create secret generic etcd-certs \
--from-file=kamaji/etcd/ca.crt \
--from-file=kamaji/etcd/ca.key
kubectl -n kamaji-system create secret tls root-client-certs \
--cert=kamaji/etcd/root.crt \
--key=kamaji/etcd/root.key
```
### Install with a single manifest
Install with the single YAML file installer:
```bash
kubectl -n kamaji-system apply -f ../config/install.yaml
```
Make sure to patch the `etcd` endpoints of the Kamaji controller, according to your environment:
```bash
cat > patch-deploy.yaml <<EOF
spec:
template:
spec:
containers:
- name: manager
args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=127.0.0.1:8080
- --leader-elect
- --etcd-endpoints=${ETCD0}:2379,${ETCD1}:2379,${ETCD2}:2379
EOF
kubectl -n kamaji-system patch \
deployment kamaji-controller-manager \
--patch-file patch-deploy.yaml
```
The Kamaji Tenant Control Plane controller is now running on the Admin Cluster:
```bash
kubectl -n kamaji-system get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
operator-controller-manager 1/1 1 1 14h
```
## Setup Tenant Cluster
Now you are getting an Admin Cluster available to run multiple Tenant Control Planes, deployed by the Kamaji controller. Please, refer to the Kamaji Tenant Deployment [guide](./kamaji-tenant-deployment-guide.md).

View File

@@ -1,382 +0,0 @@
# Kamaji Tenant Deployment Guide
This guide defines the necessary actions to generate a kubernetes tenant cluster, which can be considered made of a virtual kubernetes control plane, deployed by Kamaji, and joining worker nodes pool to start workloads.
## Requirements
* [Kubernetes](https://kubernetes.io) Admin Cluster having [Kamaji](./getting-started-with-kamaji.md) installed.
* [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
* [jq](https://stedolan.github.io/jq/)
## Tenant Control Plane
Kamaji offers a [CRD](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing tenant control planes. This *CRD* is called `TenantControlPlane`, or `tcp` in short.
Use the command `kubectl explain tcp.spec` to understand the fields and their usage.
### Variable Definitions
Throughout the instructions, shell variables are used to indicate values that you should adjust to your own environment:
```bash
source kamaji-tenant.env
```
### Creation
Create a tenant control plane of example
```yaml
cat > ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml <<EOF
apiVersion: kamaji.clastix.io/v1alpha1
kind: TenantControlPlane
metadata:
name: ${TENANT_NAME}
namespace: ${TENANT_NAMESPACE}
spec:
controlPlane:
deployment:
replicas: 2
additionalMetadata:
annotations:
environment.clastix.io: ${TENANT_NAME}
labels:
tenant.clastix.io: ${TENANT_NAME}
kind.clastix.io: deployment
service:
additionalMetadata:
annotations:
environment.clastix.io: ${TENANT_NAME}
labels:
tenant.clastix.io: ${TENANT_NAME}
kind.clastix.io: service
serviceType: LoadBalancer
ingress:
enabled: false
kubernetes:
version: ${TENANT_VERSION}
kubelet:
cgroupfs: systemd
admissionControllers:
- ResourceQuota
- LimitRanger
networkProfile:
address: ${TENANT_ADDR}
port: ${TENANT_PORT}
certSANs:
- ${TENANT_NAME}.${TENANT_DOMAIN}
serviceCidr: ${TENANT_SVC_CIDR}
podCidr: ${TENANT_POD_CIDR}
dnsServiceIPs:
- ${TENANT_DNS_SERVICE}
addons:
coreDNS: {}
kubeProxy: {}
EOF
```
If workers are not reachable from tenant control plane, konnectivity can be enabled (it is by default):
```yaml
...
addons:
konnectivity:
enabled: true
proxyHost: "172.18.0.2"
proxyPort: 31132
...
```
`proxyHost` is the address where konnectivity proxy server will be running. Konnectivity works as sidecar container into the tenant control plane pod. If no value is specified, it will take tenant IP.
`proxyPort` is the port where konnectivity proxy server will be running. (default `8132`)
```bash
kubectl create namespace ${TENANT_NAMESPACE}
kubectl apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
```
A tenant control plane control plane is now running as deployment and it is exposed through a service.
Check if control plane of the tenant is reachable and in healty state
```bash
curl -k https://${TENANT_ADDR}:${TENANT_PORT}/healthz
curl -k https://${TENANT_ADDR}:${TENANT_PORT}/version
```
The tenant control plane components, i.e. `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are running as containers in the same pods. The `kube-scheduler`, and `kube-controller-manager` connect the `kube-apiserver` throught localhost: `https://127.0.0.1.${TENANT_PORT}`
Let's retrieve the `kubeconfig` files in order to check:
```bash
kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-scheduler-kubeconfig -o json \
| jq -r '.data["scheduler.conf"]' \
| base64 -d \
> ${TENANT_NAMESPACE}-${TENANT_NAME}-scheduler.kubeconfig
```
```bash
kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-controller-manager-kubeconfig -o json \
| jq -r '.data["controller-manager.conf"]' \
| base64 -d \
> ${TENANT_NAMESPACE}-${TENANT_NAME}-controller-manager.kubeconfig
```
## Working with Tenant Control Plane
A new Tenant cluster will be available at this moment but, it will not be useful without having worker nodes joined to it.
### Getting Tenant Control Plane Kubeconfig
Let's retrieve the `kubeconfig` in order to work with the tenant control plane.
```bash
kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-admin-kubeconfig -o json \
| jq -r '.data["admin.conf"]' \
| base64 -d \
> ${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
```
and let's check it out:
```bash
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get svc
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes ClusterIP 10.32.0.1 <none> 443/TCP 6m
```
Check out how the Tenant control Plane advertises itself to workloads:
```bash
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get ep
NAME ENDPOINTS AGE
kubernetes 192.168.32.150:6443 18m
```
Make sure it's `${TENANT_ADDR}:${TENANT_PORT}`.
### Preparing Worker Nodes to join
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other IaC tools.
Use bash script `nodes-prerequisites.sh` to install the dependencies on all the worker nodes:
- Install `containerd` as container runtime
- Install `crictl`, the command line for working with `containerd`
- Install `kubectl`, `kubelet`, and `kubeadm` in the desired version
> Warning: we assume worker nodes are machines running `Ubuntu 20.04`
Run the installation script:
```bash
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2} ${WORKER3})
./nodes-prerequisites.sh ${TENANT_VERSION:1} ${HOSTS[@]}
```
### Join Command
The current approach for joining nodes is to use the kubeadm one therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable.
```bash
JOIN_CMD=$(echo "sudo ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command)
```
### Adding Worker Nodes
A bash loop will be used to join all the available nodes.
```bash
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2} ${WORKER3})
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh ${USER}@${HOST} -t ${JOIN_CMD};
done
```
Checking the nodes:
```bash
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
kamaji-tenant-worker-00 NotReady <none> 1m v1.23.4
kamaji-tenant-worker-01 NotReady <none> 1m v1.23.4
kamaji-tenant-worker-02 NotReady <none> 1m v1.23.4
kamaji-tenant-worker-03 NotReady <none> 1m v1.23.4
```
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In our case, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico).
```bash
kubectl apply -f calico-cni/calico-crd.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl apply -f calico-cni/calico.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
```
And after a while, `kube-system` pods will be running.
```bash
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-8594699699-dlhbj 1/1 Running 0 3m
calico-node-kxf6n 1/1 Running 0 3m
calico-node-qtdlw 1/1 Running 0 3m
coredns-64897985d-2v5lc 1/1 Running 0 5m
coredns-64897985d-nq276 1/1 Running 0 5m
kube-proxy-cwdww 1/1 Running 0 3m
kube-proxy-m48v4 1/1 Running 0 3m
```
And the nodes will be ready
```bash
kubectl get nodes --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
NAME STATUS ROLES AGE VERSION
kamaji-tenant-worker-01 Ready <none> 10m v1.23.4
kamaji-tenant-worker-02 Ready <none> 10m v1.23.4
```
## Smoke test
The tenant cluster is now ready to accept workloads.
Export its `kubeconfig` file
```bash
export KUBECONFIG=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
```
#### Deployment
Deploy a `nginx` application on the tenant cluster
```bash
kubectl create deployment nginx --image=nginx
```
and check the `nginx` pod gets scheduled
```bash
kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx-6799fc88d8-4sgcb 1/1 Running 0 33s 172.12.121.1 worker02
```
#### Port Forwarding
Verify the ability to access applications remotely using port forwarding.
Retrieve the full name of the `nginx` pod:
```bash
POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath="{.items[0].metadata.name}")
```
Forward port 8080 on your local machine to port 80 of the `nginx` pod:
```bash
kubectl port-forward $POD_NAME 8080:80
Forwarding from 127.0.0.1:8080 -> 80
Forwarding from [::1]:8080 -> 80
```
In a new terminal make an HTTP request using the forwarding address:
```bash
curl --head http://127.0.0.1:8080
HTTP/1.1 200 OK
Server: nginx/1.21.0
Date: Sat, 19 Jun 2021 08:19:01 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 May 2021 12:28:56 GMT
Connection: keep-alive
ETag: "60aced88-264"
Accept-Ranges: bytes
```
Switch back to the previous terminal and stop the port forwarding to the `nginx` pod.
#### Logs
Verify the ability to retrieve container logs.
Print the `nginx` pod logs:
```bash
kubectl logs $POD_NAME
...
127.0.0.1 - - [19/Jun/2021:08:19:01 +0000] "HEAD / HTTP/1.1" 200 0 "-" "curl/7.68.0" "-"
```
#### Kubelet tunnel
Verify the ability to execute commands in a container.
Print the `nginx` version by executing the `nginx -v` command in the `nginx` container:
```bash
kubectl exec -ti $POD_NAME -- nginx -v
nginx version: nginx/1.21.0
```
#### Services
Verify the ability to expose applications using a service.
Expose the `nginx` deployment using a `NodePort` service:
```bash
kubectl expose deployment nginx --port 80 --type NodePort
```
Retrieve the node port assigned to the `nginx` service:
```bash
NODE_PORT=$(kubectl get svc nginx \
--output=jsonpath='{range .spec.ports[0]}{.nodePort}')
```
Retrieve the IP address of a worker instance and make an HTTP request:
```bash
curl -I http://${WORKER0}:${NODE_PORT}
HTTP/1.1 200 OK
Server: nginx/1.21.0
Date: Sat, 19 Jun 2021 09:29:01 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 May 2021 12:28:56 GMT
Connection: keep-alive
ETag: "60aced88-264"
Accept-Ranges: bytes
```
## Cleanup Tenant cluster
Remove the worker nodes joined the tenant control plane
```bash
kubectl delete nodes --all --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
```
For each worker node, login and clean it
```bash
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2} ${WORKER3})
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh ${USER}@${HOST} -t 'sudo kubeadm reset -f';
ssh ${USER}@${HOST} -t 'sudo rm -rf /etc/cni/net.d';
ssh ${USER}@${HOST} -t 'sudo systemctl reboot';
done
```
Delete the tenant control plane from kamaji
```bash
kubectl delete -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
```

View File

@@ -1,12 +1,12 @@
## Configuration
Currently **kamaji** supports (in this order):
Currently **Kamaji** supports (in this order):
* CLI flags
* Environment variables
* Configuration files
By default **kamaji** search for the configuration file (`kamaji.yaml`) and uses parameters found inside of it. In case some environment variable are passed, this will override configuration file parameters. In the end, if also a CLI flag is passed, this will override both env vars and config file as well.
By default **Kamaji** search for the configuration file (`kamaji.yaml`) and uses parameters found inside of it. In case some environment variable are passed, this will override configuration file parameters. In the end, if also a CLI flag is passed, this will override both env vars and config file as well.
This is easily explained in this way:
@@ -22,12 +22,12 @@ Available flags are the following:
--etcd-client-secret-namespace Name of the namespace where the secret which contains ETCD client certificates is. (default: "kamaji")
--etcd-compaction-interval ETCD Compaction interval (i.e. "5m0s"). (default: "0" (disabled))
--etcd-endpoints Comma-separated list with ETCD endpoints (i.e. https://etcd-0.etcd.kamaji.svc.cluster.local,https://etcd-1.etcd.kamaji.svc.cluster.local,https://etcd-2.etcd.kamaji.svc.cluster.local)
--etcd-storage-type ETCD Storage type (i.e. "etcd", "kine-mysql"). (default: "etcd")
--etcd-storage-type ETCD Storage type (i.e. "etcd", "kine-mysql", "kine-postgresql"). (default: "etcd")
--health-probe-bind-address string The address the probe endpoint binds to. (default ":8081")
--kine-mysql-host Host where MySQL is running (default: "localhost")
--kine-mysql-port int Port where MySQL is running (default: 3306)
--kine-mysql-secret-name Name of the secret where the necessary configuration and certificates are. (default: "mysql-config")
--kine-mysql-secret-name Name of the namespace of the secret where the necessary configuration and certificates are. (default: "kamaji-system")
--kine-port int Port where the DB used by Kine is listening to.
--kine-host string Host where the DB used by Kine is working.
--kine-secret-name Name of the secret which contains the Kine configuration. (default: "kine-secret")
--kine-secret-name Name of the namespace where the secret which contains the Kine configuration. (default: "kamaji-system")
--kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
--leader-elect Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.
--metrics-bind-address string The address the metric endpoint binds to. (default ":8080")
@@ -92,12 +92,13 @@ $ make yaml-installation-file
It will generate a yaml installation file at `config/install.yaml`. It should be customize accordingly.
## Tenant Control Planes
**Kamaji** offers a [CRD](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing tenant control planes. This *CRD* is called `TenantControlPlane`, or `tcp` in short. Use the command `kubectl explain tcp.spec` to understand the fields and their usage.
### Add-ons
Kamaji provides optional installations into the deployed tenant control plane through add-ons. Is it possible to enable/disable them through the `tcp` definition.
**Kamaji** provides optional installations into the deployed tenant control plane through add-ons. Is it possible to enable/disable them through the `tcp` definition.
### Core DNS
@@ -117,11 +118,15 @@ addons:
```yaml
addons:
konnectivity:
proxyPort: 31132 # mandatory
proxyHost: "172.18.0.2"
allowAddressAsExternalIP: false
serviceType: NodePort # mandatory
version: v0.0.31
serverImage: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-server
agentImage: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent
konnectivity:
proxyPort: 31132 # mandatory
version: v0.0.31
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 100m
memory: 128Mi
serverImage: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-server
agentImage: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent

8
docs/versioning.md Normal file
View File

@@ -0,0 +1,8 @@
# Versioning and support
In Kamaji, there are different components that might require independent versioning and support level:
|Kamaji|Admin Cluster|Tenant Cluster (min)|Tenant Cluster (max)|Konnectivity|Tenant etcd |
|------|-------------|--------------------|--------------------|------------|------------|
|0.0.1 |1.22.0+ |1.21.0 |1.23.x |0.0.32 |3.5.4 |
Other combinations might work but have not been tested.

View File

@@ -28,9 +28,6 @@ var _ = Describe("Deploy a TenantControlPlane resource", func() {
Deployment: kamajiv1alpha1.DeploymentSpec{
Replicas: 1,
},
Ingress: kamajiv1alpha1.IngressSpec{
Enabled: false,
},
Service: kamajiv1alpha1.ServiceSpec{
ServiceType: "ClusterIP",
},

View File

@@ -47,9 +47,6 @@ var _ = Describe("starting a kind worker with kubeadm", func() {
Deployment: kamajiv1alpha1.DeploymentSpec{
Replicas: 1,
},
Ingress: kamajiv1alpha1.IngressSpec{
Enabled: false,
},
Service: kamajiv1alpha1.ServiceSpec{
ServiceType: "NodePort",
},

View File

@@ -41,9 +41,6 @@ var _ = Describe("validating kubeconfig", func() {
Deployment: kamajiv1alpha1.DeploymentSpec{
Replicas: 1,
},
Ingress: kamajiv1alpha1.IngressSpec{
Enabled: false,
},
Service: kamajiv1alpha1.ServiceSpec{
ServiceType: "NodePort",
},

11
go.mod
View File

@@ -4,8 +4,10 @@ go 1.18
require (
github.com/go-logr/logr v1.2.0
github.com/go-pg/pg/v10 v10.10.6
github.com/go-sql-driver/mysql v1.6.0
github.com/google/uuid v1.3.0
github.com/json-iterator/go v1.1.12
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.17.0
github.com/pkg/errors v0.9.1
@@ -70,6 +72,7 @@ require (
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/go-pg/zerochecker v0.2.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
@@ -84,8 +87,8 @@ require (
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/lithammer/dedent v1.1.0 // indirect
github.com/magiconair/properties v1.8.5 // indirect
@@ -117,6 +120,11 @@ require (
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/stretchr/testify v1.7.0 // indirect
github.com/subosito/gotenv v1.2.0 // indirect
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
github.com/vmihailenco/bufpool v0.1.11 // indirect
github.com/vmihailenco/msgpack/v5 v5.3.4 // indirect
github.com/vmihailenco/tagparser v0.1.2 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.1 // indirect
go.opencensus.io v0.23.0 // indirect
@@ -148,6 +156,7 @@ require (
k8s.io/klog/v2 v2.60.1 // indirect
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
k8s.io/system-validators v1.6.0 // indirect
mellium.im/sasl v0.3.0 // indirect
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
sigs.k8s.io/kustomize/api v0.10.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.0 // indirect

23
go.sum
View File

@@ -413,6 +413,10 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU=
github.com/go-pg/pg/v10 v10.10.6 h1:1vNtPZ4Z9dWUw/TjJwOfFUbF5nEq1IkR6yG8Mq/Iwso=
github.com/go-pg/pg/v10 v10.10.6/go.mod h1:GLmFXufrElQHf5uzM3BQlcfwV3nsgnHue5uzjQ6Nqxg=
github.com/go-pg/zerochecker v0.2.0 h1:pp7f72c3DobMWOb2ErtZsnrPaSvHd2W4o9//8HtF4mU=
github.com/go-pg/zerochecker v0.2.0/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo=
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
@@ -591,6 +595,8 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
@@ -723,6 +729,7 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
@@ -909,6 +916,8 @@ github.com/testcontainers/testcontainers-go v0.13.0 h1:OUujSlEGsXVo/ykPVZk3KanBN
github.com/testcontainers/testcontainers-go v0.13.0/go.mod h1:z1abufU633Eb/FmSBTzV6ntZAC1eZBYPtaFsn4nPuDk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo=
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
@@ -920,6 +929,14 @@ github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:tw
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vmihailenco/bufpool v0.1.11 h1:gOq2WmBrq0i2yW5QJ16ykccQ4wH9UyEsgLm6czKAd94=
github.com/vmihailenco/bufpool v0.1.11/go.mod h1:AFf/MOy3l2CFTKbxwt0mp2MwnqjNEs5H/UxrkA5jxTQ=
github.com/vmihailenco/msgpack/v5 v5.3.4 h1:qMKAwOV+meBw2Y8k9cVwAy7qErtYCwBzZ2ellBfvnqc=
github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc=
github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
@@ -995,6 +1012,7 @@ go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -1010,6 +1028,7 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa h1:idItI2DDfCokpg0N51B2VtiLdJ4vAuXC9fnCb2gACo4=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1249,6 +1268,7 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210923061019-b8560ed6a9b7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1626,6 +1646,9 @@ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ=
mellium.im/sasl v0.3.0 h1:0qoaTCTo5Py7u/g0cBIQZcMOgG/5LM71nshbXwznBh8=
mellium.im/sasl v0.3.0/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=

View File

@@ -1,15 +0,0 @@
# Deploy with Helm
## Pre-requisites
1. Deploy a [multi-tenant Etcd cluster](https://github.com/clastix/kamaji-internal/blob/master/deploy/getting-started-with-kamaji.md#setup-internal-multi-tenant-etcd)
```
make -C ../deploy/etcd
```
## Install
```
helm upgrade --install --namespace kamaji-system --create-namespace kamaji ./kamaji
```

View File

@@ -1,33 +0,0 @@
{{ template "chart.header" . }}
{{ template "chart.deprecationWarning" . }}
{{ template "chart.badgesSection" . }}
{{ template "chart.description" . }}
{{ template "chart.homepageLine" . }}
### Pre-requisites
Kamaji requires a [multi-tenant etcd cluster](https://github.com/clastix/kamaji-internal/blob/master/deploy/getting-started-with-kamaji.md#setup-internal-multi-tenant-etcd) cluster.
The installation and provisioning processes are already put in place by the Helm Chart starting from v0.1.1 in order to streamline the local test.
> For production use an externally managed etcd is highly recommended, the etcd addon offered by this chart is not considered production-grade.
If you'd like to use an externally managed etcd instance, you can specify the overrides and by setting the value `etcd.deploy=false`.
### Install Kamaji
To install the chart with the release name `kamaji`:
```console
helm upgrade --install --namespace kamaji-system --create-namespace kamaji .
```
{{ template "chart.maintainersSection" . }}
{{ template "chart.sourcesSection" . }}
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}

View File

@@ -40,11 +40,13 @@ const (
schedulerKubeconfig
controllerManagerKubeconfig
kineConfig
kineCerts
)
const (
apiServerFlagsAnnotation = "kube-apiserver.kamaji.clastix.io/args"
kineVolumeName = "kine-config"
kineVolumeCertName = "kine-certs"
)
type Deployment struct {
@@ -592,18 +594,37 @@ func (d *Deployment) buildKineVolume(podSpec *corev1.PodSpec, tcp *kamajiv1alpha
podSpec.Volumes = volumes
}
if d.ETCDStorageType == types.KineMySQL {
if found, index := utilities.HasNamedVolume(podSpec.Volumes, kineVolumeCertName); found {
var volumes []corev1.Volume
volumes = append(volumes, podSpec.Volumes[:index]...)
volumes = append(volumes, podSpec.Volumes[index+1:]...)
podSpec.Volumes = volumes
}
if d.ETCDStorageType == types.KineMySQL || d.ETCDStorageType == types.KinePostgreSQL {
if index := int(kineConfig) + 1; len(podSpec.Volumes) < index {
podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{})
}
// Adding the volume to read Kine certificates:
// these must be subsequently fixed with a chmod due to pg issues with private key.
podSpec.Volumes[kineConfig].Name = kineVolumeName
podSpec.Volumes[kineConfig].VolumeSource = corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: tcp.Status.Storage.KineMySQL.Certificate.SecretName,
SecretName: tcp.Status.Storage.Kine.Certificate.SecretName,
DefaultMode: pointer.Int32Ptr(420),
},
}
// Adding the Volume for the certificates with fixed permission
if index := int(kineCerts) + 1; len(podSpec.Volumes) < index {
podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{})
}
podSpec.Volumes[kineCerts].Name = kineVolumeCertName
podSpec.Volumes[kineCerts].VolumeSource = corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
}
}
}
@@ -619,58 +640,95 @@ func (d *Deployment) buildKine(podSpec *corev1.PodSpec, tcp *kamajiv1alpha1.Tena
podSpec.Containers = containers
}
// In case of bare ETCD we exit without mangling the PodSpec resource.
if d.ETCDStorageType == types.ETCD {
return
}
if d.ETCDStorageType == types.KineMySQL {
if index := int(kineIndex) + 1; len(podSpec.Containers) < index {
podSpec.Containers = append(podSpec.Containers, corev1.Container{})
}
if index := int(kineIndex) + 1; len(podSpec.Containers) < index {
podSpec.Containers = append(podSpec.Containers, corev1.Container{})
}
args := map[string]string{}
args := map[string]string{}
if tcp.Spec.ControlPlane.Deployment.ExtraArgs != nil {
args = utilities.ArgsFromSliceToMap(tcp.Spec.ControlPlane.Deployment.ExtraArgs.Kine)
}
if tcp.Spec.ControlPlane.Deployment.ExtraArgs != nil {
args = utilities.ArgsFromSliceToMap(tcp.Spec.ControlPlane.Deployment.ExtraArgs.Kine)
}
args["--endpoint"] = "mysql://$(MYSQL_USER):$(MYSQL_PASSWORD)@tcp($(MYSQL_HOST):$(MYSQL_PORT))/$(MYSQL_SCHEMA)"
args["--ca-file"] = "/kine/ca.crt"
args["--cert-file"] = "/kine/server.crt"
args["--key-file"] = "/kine/server.key"
switch d.ETCDStorageType {
case types.KineMySQL:
args["--endpoint"] = "mysql://$(DB_USER):$(DB_PASSWORD)@tcp($(DB_HOST):$(DB_PORT))/$(DB_SCHEMA)"
case types.KinePostgreSQL:
args["--endpoint"] = "postgres://$(DB_USER):$(DB_PASSWORD)@$(DB_HOST):$(DB_PORT)/$(DB_SCHEMA)"
}
podSpec.Containers[kineIndex].Name = kineContainerName
podSpec.Containers[kineIndex].Image = d.KineContainerImage
podSpec.Containers[kineIndex].Command = []string{"/bin/kine"}
podSpec.Containers[kineIndex].Args = utilities.ArgsFromMapToSlice(args)
podSpec.Containers[kineIndex].VolumeMounts = []corev1.VolumeMount{
{
Name: kineVolumeName,
MountPath: "/kine",
ReadOnly: true,
args["--ca-file"] = "/certs/ca.crt"
args["--cert-file"] = "/certs/server.crt"
args["--key-file"] = "/certs/server.key"
podSpec.InitContainers = []corev1.Container{
{
Name: "chmod",
Image: d.KineContainerImage,
ImagePullPolicy: corev1.PullAlways,
TerminationMessagePath: corev1.TerminationMessagePathDefault,
TerminationMessagePolicy: corev1.TerminationMessageReadFile,
Command: []string{"sh"},
Args: []string{
"-c",
"cp /kine/*.* /certs && chmod -R 600 /certs/*.*",
},
}
podSpec.Containers[kineIndex].Env = []corev1.EnvVar{
{
Name: "GODEBUG",
Value: "x509ignoreCN=0",
},
}
podSpec.Containers[kineIndex].EnvFrom = []corev1.EnvFromSource{
{
SecretRef: &corev1.SecretEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: tcp.Status.Storage.KineMySQL.Config.SecretName,
},
VolumeMounts: []corev1.VolumeMount{
{
Name: kineVolumeName,
ReadOnly: true,
MountPath: "/kine",
},
{
Name: kineVolumeCertName,
MountPath: "/certs",
ReadOnly: false,
},
},
}
podSpec.Containers[kineIndex].Ports = []corev1.ContainerPort{
{
ContainerPort: 2379,
Name: "server",
Protocol: corev1.ProtocolTCP,
},
}
podSpec.Containers[kineIndex].ImagePullPolicy = corev1.PullAlways
},
}
podSpec.Containers[kineIndex].Name = kineContainerName
podSpec.Containers[kineIndex].Image = d.KineContainerImage
podSpec.Containers[kineIndex].Command = []string{"/bin/kine"}
podSpec.Containers[kineIndex].Args = utilities.ArgsFromMapToSlice(args)
podSpec.Containers[kineIndex].VolumeMounts = []corev1.VolumeMount{
{
Name: kineVolumeCertName,
MountPath: "/certs",
ReadOnly: false,
},
}
podSpec.Containers[kineIndex].TerminationMessagePath = corev1.TerminationMessagePathDefault
podSpec.Containers[kineIndex].TerminationMessagePolicy = corev1.TerminationMessageReadFile
podSpec.Containers[kineIndex].Env = []corev1.EnvVar{
{
Name: "GODEBUG",
Value: "x509ignoreCN=0",
},
}
podSpec.Containers[kineIndex].EnvFrom = []corev1.EnvFromSource{
{
SecretRef: &corev1.SecretEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: tcp.Status.Storage.Kine.Config.SecretName,
},
},
},
}
podSpec.Containers[kineIndex].Ports = []corev1.ContainerPort{
{
ContainerPort: 2379,
Name: "server",
Protocol: corev1.ProtocolTCP,
},
}
podSpec.Containers[kineIndex].ImagePullPolicy = corev1.PullAlways
}
func (d *Deployment) SetSelector(deploymentSpec *appsv1.DeploymentSpec, tcp *kamajiv1alpha1.TenantControlPlane) {

View File

@@ -30,10 +30,8 @@ const (
defaultETCDClientSecretName = "root-client-certs"
defaultETCDClientSecretNamespace = "kamaji-system"
defaultTmpDirectory = "/tmp/kamaji"
defaultKineMySQLSecretName = "mysql-config"
defaultKineMySQLSecretNamespace = "kamaji-system"
defaultKineMySQLHost = "localhost"
defaultKineMySQLPort = 3306
defaultKineSecretName = "kine-secret"
defaultKineSecretNamespace = "kamaji-system"
defaultKineImage = "rancher/kine:v0.9.2-amd64"
)
@@ -46,7 +44,7 @@ func InitConfig() (*viper.Viper, error) {
flag.String("health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.Bool("leader-elect", false, "Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
flag.String("etcd-storage-type", defaultETCDStorageType, "Type of storage for ETCD (i.e etcd, kine-mysql, kine-postgres)")
flag.String("etcd-storage-type", defaultETCDStorageType, "Type of storage for ETCD (i.e etcd, kine-mysql, kine-psql)")
flag.String("etcd-ca-secret-name", defaultETCDCASecretName, "Name of the secret which contains CA's certificate and private key.")
flag.String("etcd-ca-secret-namespace", defaultETCDCASecretNamespace, "Namespace of the secret which contains CA's certificate and private key.")
flag.String("etcd-client-secret-name", defaultETCDClientSecretName, "Name of the secret which contains ETCD client certificates")
@@ -54,10 +52,10 @@ func InitConfig() (*viper.Viper, error) {
flag.String("etcd-endpoints", defaultETCDEndpoints, "Comma-separated list with ETCD endpoints (i.e. https://etcd-0.etcd.kamaji-system.svc.cluster.local,https://etcd-1.etcd.kamaji-system.svc.cluster.local,https://etcd-2.etcd.kamaji-system.svc.cluster.local)")
flag.String("etcd-compaction-interval", defaultETCDCompactionInterval, "ETCD Compaction interval (i.e. \"5m0s\"). (default: \"0\" (disabled))")
flag.String("tmp-directory", defaultTmpDirectory, "Directory which will be used to work with temporary files.")
flag.String("kine-mysql-secret-name", defaultKineMySQLSecretName, "Name of the secret which contains MySQL (Kine) configuration.")
flag.String("kine-mysql-secret-namespace", defaultKineMySQLSecretNamespace, "Name of the namespace where the secret which contains MySQL (Kine) configuration.")
flag.String("kine-mysql-host", defaultKineMySQLHost, "Host where MySQL (Kine) is working")
flag.Int("kine-mysql-port", defaultKineMySQLPort, "Port where MySQL (Kine) is working")
flag.String("kine-secret-name", defaultKineSecretName, "Name of the secret which contains the Kine configuration.")
flag.String("kine-secret-namespace", defaultKineSecretNamespace, "Name of the namespace where the secret which contains the Kine configuration.")
flag.String("kine-host", "", "Host where the DB used by Kine is working.")
flag.Int("kine-port", 0, "Port where the DB used by Kine is listening to.")
flag.String("kine-image", defaultKineImage, "Container image along with tag to use for the Kine sidecar container (used only if etcd-storage-type is set to one of kine strategies)")
// Setup zap configuration
@@ -110,16 +108,16 @@ func InitConfig() (*viper.Viper, error) {
if err := config.BindEnv("tmp-directory", fmt.Sprintf("%s_TMP_DIRECTORY", envPrefix)); err != nil {
return nil, err
}
if err := config.BindEnv("kine-mysql-secret-name", fmt.Sprintf("%s_KINE_MYSQL_SECRET_NAME", envPrefix)); err != nil {
if err := config.BindEnv("kine-secret-name", fmt.Sprintf("%s_KINE_SECRET_NAME", envPrefix)); err != nil {
return nil, err
}
if err := config.BindEnv("kine-mysql-secret-namespace", fmt.Sprintf("%s_KINE_MYSQL_SECRET_NAMESPACE", envPrefix)); err != nil {
if err := config.BindEnv("kine-secret-namespace", fmt.Sprintf("%s_KINE_SECRET_NAMESPACE", envPrefix)); err != nil {
return nil, err
}
if err := config.BindEnv("kine-mysql-host", fmt.Sprintf("%s_KINE_MYSQL_HOST", envPrefix)); err != nil {
if err := config.BindEnv("kine-host", fmt.Sprintf("%s_KINE_HOST", envPrefix)); err != nil {
return nil, err
}
if err := config.BindEnv("kine-mysql-port", fmt.Sprintf("%s_KINE_MYSQL_PORT", envPrefix)); err != nil {
if err := config.BindEnv("kine-port", fmt.Sprintf("%s_KINE_PORT", envPrefix)); err != nil {
return nil, err
}
if err := config.BindEnv("kine-image", fmt.Sprintf("%s_KINE_IMAGE", envPrefix)); err != nil {

View File

@@ -311,6 +311,9 @@ func createKubeProxyAddon(client kubernetes.Interface) error {
NodeSelector: map[string]string{
"kubernetes.io/os": "linux",
},
Tolerations: []corev1.Toleration{
{Operator: corev1.TolerationOpExists},
},
PriorityClassName: "system-node-critical",
RestartPolicy: corev1.RestartPolicyAlways,
SchedulerName: "default-scheduler",

View File

@@ -30,7 +30,7 @@ type APIServerCertificate struct {
}
func (r *APIServerCertificate) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.APIServer.SecretName != r.resource.GetName()
return tenantControlPlane.Status.Certificates.APIServer.Checksum != r.resource.GetAnnotations()["checksum"]
}
func (r *APIServerCertificate) ShouldCleanup(plane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -75,15 +75,14 @@ func (r *APIServerCertificate) GetName() string {
func (r *APIServerCertificate) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.APIServer.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.APIServer.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.APIServer.Checksum = r.resource.GetAnnotations()["checksum"]
return nil
}
func (r *APIServerCertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
latestConfigRV := getLatestConfigRV(*tenantControlPlane)
actualConfigRV := r.resource.GetLabels()["latest-config-rv"]
if latestConfigRV == actualConfigRV {
if checksum := tenantControlPlane.Status.Certificates.APIServer.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()["checksum"] {
isValid, err := kubeadm.IsCertificatePrivateKeyPairValid(
r.resource.Data[kubeadmconstants.APIServerCertName],
r.resource.Data[kubeadmconstants.APIServerKeyName],
@@ -122,10 +121,16 @@ func (r *APIServerCertificate) mutate(ctx context.Context, tenantControlPlane *k
kubeadmconstants.APIServerKeyName: certificateKeyPair.PrivateKey,
}
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
r.resource.SetAnnotations(annotations)
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"latest-config-rv": latestConfigRV,
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},

View File

@@ -30,7 +30,7 @@ type APIServerKubeletClientCertificate struct {
}
func (r *APIServerKubeletClientCertificate) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.APIServerKubeletClient.SecretName != r.resource.GetName()
return tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum != r.resource.GetAnnotations()["checksum"]
}
func (r *APIServerKubeletClientCertificate) ShouldCleanup(plane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -75,15 +75,14 @@ func (r *APIServerKubeletClientCertificate) GetName() string {
func (r *APIServerKubeletClientCertificate) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.APIServerKubeletClient.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.APIServerKubeletClient.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum = r.resource.GetAnnotations()["checksum"]
return nil
}
func (r *APIServerKubeletClientCertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
latestConfigRV := getLatestConfigRV(*tenantControlPlane)
actualConfigRV := r.resource.GetLabels()["latest-config-rv"]
if latestConfigRV == actualConfigRV {
if checksum := tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()["checksum"] {
isValid, err := kubeadm.IsCertificatePrivateKeyPairValid(
r.resource.Data[kubeadmconstants.APIServerKubeletClientCertName],
r.resource.Data[kubeadmconstants.APIServerKubeletClientKeyName],
@@ -125,12 +124,18 @@ func (r *APIServerKubeletClientCertificate) mutate(ctx context.Context, tenantCo
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"latest-config-rv": latestConfigRV,
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
r.resource.SetAnnotations(annotations)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}
}

View File

@@ -30,7 +30,7 @@ type CACertificate struct {
func (r *CACertificate) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.CA.SecretName != r.resource.GetName() ||
tenantControlPlane.Status.Certificates.CA.ResourceVersion != r.resource.ResourceVersion
tenantControlPlane.Status.Certificates.CA.Checksum != r.resource.GetAnnotations()["checksum"]
}
func (r *CACertificate) ShouldCleanup(plane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -75,22 +75,24 @@ func (r *CACertificate) GetName() string {
func (r *CACertificate) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.CA.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.CA.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.CA.ResourceVersion = r.resource.ResourceVersion
tenantControlPlane.Status.Certificates.CA.Checksum = r.resource.GetAnnotations()["checksum"]
return nil
}
func (r *CACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
isValid, err := kubeadm.IsCertificatePrivateKeyPairValid(
r.resource.Data[kubeadmconstants.CACertName],
r.resource.Data[kubeadmconstants.CAKeyName],
)
if err != nil {
r.Log.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.CACertAndKeyBaseName, err.Error()))
}
if isValid {
return nil
if checksum := tenantControlPlane.Status.Certificates.CA.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()["checksum"] {
isValid, err := kubeadm.IsCertificatePrivateKeyPairValid(
r.resource.Data[kubeadmconstants.CACertName],
r.resource.Data[kubeadmconstants.CAKeyName],
)
if err != nil {
r.Log.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.CACertAndKeyBaseName, err.Error()))
}
if isValid {
return nil
}
}
config, err := getStoredKubeadmConfiguration(ctx, r, tenantControlPlane)
@@ -116,6 +118,13 @@ func (r *CACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1
},
))
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
r.resource.SetAnnotations(annotations)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}
}

View File

@@ -6,7 +6,6 @@ package resources
import (
"context"
"fmt"
"reflect"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
@@ -36,7 +35,7 @@ func (r *ETCDCACertificatesResource) ShouldStatusBeUpdated(ctx context.Context,
return true
}
return tenantControlPlane.Status.Certificates.ETCD.CA.SecretName != r.resource.GetName()
return tenantControlPlane.Status.Certificates.ETCD.CA.Checksum != r.resource.GetAnnotations()["checksum"]
}
func (r *ETCDCACertificatesResource) ShouldCleanup(plane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -73,6 +72,7 @@ func (r *ETCDCACertificatesResource) UpdateTenantControlPlaneStatus(ctx context.
tenantControlPlane.Status.Certificates.ETCD.CA.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.ETCD.CA.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.ETCD.CA.Checksum = r.resource.GetAnnotations()["checksum"]
return nil
}
@@ -83,7 +83,16 @@ func (r *ETCDCACertificatesResource) getPrefixedName(tenantControlPlane *kamajiv
func (r *ETCDCACertificatesResource) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
r.resource.SetLabels(utilities.KamajiLabels())
if etcdStatus := tenantControlPlane.Status.Certificates.ETCD; etcdStatus != nil && len(etcdStatus.CA.Checksum) > 0 && etcdStatus.CA.Checksum == r.resource.GetAnnotations()["checksum"] {
isValid, err := etcd.IsETCDCertificateAndKeyPairValid(r.resource.Data[kubeadmconstants.CACertName], r.resource.Data[kubeadmconstants.CAKeyName])
if err != nil {
r.Log.Info(fmt.Sprintf("etcd certificates are not valid: %s", err.Error()))
}
if isValid {
return nil
}
}
etcdCASecretNamespacedName := k8stypes.NamespacedName{Namespace: r.ETCDCASecretNamespace, Name: r.ETCDCASecretName}
etcdCASecret := &corev1.Secret{}
@@ -91,29 +100,20 @@ func (r *ETCDCACertificatesResource) mutate(ctx context.Context, tenantControlPl
return err
}
isValid, err := etcd.IsETCDCertificateAndKeyPairValid(r.resource.Data[kubeadmconstants.CACertName], r.resource.Data[kubeadmconstants.CAKeyName])
if err != nil {
r.Log.Info(fmt.Sprintf("etcd certificates are not valid: %s", err.Error()))
}
if reflect.DeepEqual(etcdCASecret.Data[kubeadmconstants.CACertName], r.resource.Data[kubeadmconstants.CACertName]) &&
reflect.DeepEqual(etcdCASecret.Data[kubeadmconstants.CAKeyName], r.resource.Data[kubeadmconstants.CAKeyName]) {
if isValid {
return nil
}
return fmt.Errorf("CA certificates provided into secrets %s/%s are not valid", r.ETCDCASecretNamespace, r.ETCDCASecretName)
}
r.resource.Data = map[string][]byte{
kubeadmconstants.CACertName: etcdCASecret.Data[kubeadmconstants.CACertName],
kubeadmconstants.CAKeyName: etcdCASecret.Data[kubeadmconstants.CAKeyName],
}
if err = ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme()); err != nil {
return err
}
r.resource.SetLabels(utilities.KamajiLabels())
return nil
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
r.resource.SetAnnotations(annotations)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}
}

View File

@@ -33,7 +33,7 @@ func (r *ETCDCertificatesResource) ShouldStatusBeUpdated(ctx context.Context, te
return true
}
return tenantControlPlane.Status.Certificates.ETCD.APIServer.SecretName != r.resource.GetName()
return tenantControlPlane.Status.Certificates.ETCD.APIServer.Checksum != r.resource.GetAnnotations()["checksum"]
}
func (r *ETCDCertificatesResource) ShouldCleanup(plane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -70,6 +70,7 @@ func (r *ETCDCertificatesResource) UpdateTenantControlPlaneStatus(ctx context.Co
tenantControlPlane.Status.Certificates.ETCD.APIServer.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.ETCD.APIServer.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.ETCD.APIServer.Checksum = r.resource.GetAnnotations()["checksum"]
return nil
}
@@ -80,27 +81,27 @@ func (r *ETCDCertificatesResource) getPrefixedName(tenantControlPlane *kamajiv1a
func (r *ETCDCertificatesResource) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
r.resource.SetLabels(utilities.KamajiLabels())
if tenantControlPlane.Status.Certificates.ETCD == nil {
return fmt.Errorf("etcd is still synchronizing latest changes")
}
if checksum := tenantControlPlane.Status.Certificates.ETCD.APIServer.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()["checksum"] {
isValid, err := etcd.IsETCDCertificateAndKeyPairValid(r.resource.Data[kubeadmconstants.APIServerEtcdClientCertName], r.resource.Data[kubeadmconstants.APIServerEtcdClientKeyName])
if err != nil {
r.Log.Info(fmt.Sprintf("etcd certificates are not valid: %s", err.Error()))
}
if isValid {
return nil
}
}
etcdCASecretNamespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: tenantControlPlane.Status.Certificates.ETCD.CA.SecretName}
etcdCASecret := &corev1.Secret{}
if err := r.Client.Get(ctx, etcdCASecretNamespacedName, etcdCASecret); err != nil {
return err
}
isValid, err := etcd.IsETCDCertificateAndKeyPairValid(r.resource.Data[kubeadmconstants.APIServerEtcdClientCertName], r.resource.Data[kubeadmconstants.APIServerEtcdClientKeyName])
if err != nil {
r.Log.Info(fmt.Sprintf("etcd certificates are not valid: %s", err.Error()))
}
if isValid {
return nil
}
cert, privKey, err := etcd.GetETCDCACertificateAndKeyPair(
tenantControlPlane.GetName(),
etcdCASecret.Data[kubeadmconstants.CACertName],
@@ -115,10 +116,15 @@ func (r *ETCDCertificatesResource) mutate(ctx context.Context, tenantControlPlan
kubeadmconstants.APIServerEtcdClientKeyName: privKey.Bytes(),
}
if err = ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme()); err != nil {
return err
}
r.resource.SetLabels(utilities.KamajiLabels())
return nil
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
r.resource.SetAnnotations(annotations)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}
}

View File

@@ -30,7 +30,7 @@ type FrontProxyClientCertificate struct {
}
func (r *FrontProxyClientCertificate) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.FrontProxyClient.SecretName != r.resource.GetName()
return tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum != r.resource.GetAnnotations()["checksum"]
}
func (r *FrontProxyClientCertificate) ShouldCleanup(plane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -75,15 +75,14 @@ func (r *FrontProxyClientCertificate) GetName() string {
func (r *FrontProxyClientCertificate) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.FrontProxyClient.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.FrontProxyClient.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum = r.resource.GetAnnotations()["checksum"]
return nil
}
func (r *FrontProxyClientCertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
latestConfigRV := getLatestConfigRV(*tenantControlPlane)
actualConfigRV := r.resource.GetLabels()["latest-config-rv"]
if latestConfigRV == actualConfigRV {
if checksum := tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()["checksum"] {
isValid, err := kubeadm.IsCertificatePrivateKeyPairValid(
r.resource.Data[kubeadmconstants.FrontProxyClientCertName],
r.resource.Data[kubeadmconstants.FrontProxyClientKeyName],
@@ -125,12 +124,18 @@ func (r *FrontProxyClientCertificate) mutate(ctx context.Context, tenantControlP
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"latest-config-rv": latestConfigRV,
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
r.resource.SetAnnotations(annotations)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}
}

View File

@@ -29,7 +29,7 @@ type FrontProxyCACertificate struct {
}
func (r *FrontProxyCACertificate) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.FrontProxyCA.SecretName != r.resource.GetName()
return tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum != r.resource.GetAnnotations()["checksum"]
}
func (r *FrontProxyCACertificate) ShouldCleanup(plane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -76,21 +76,24 @@ func (r *FrontProxyCACertificate) GetName() string {
func (r *FrontProxyCACertificate) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.FrontProxyCA.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.FrontProxyCA.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum = r.resource.GetAnnotations()["checksum"]
return nil
}
func (r *FrontProxyCACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
isValid, err := kubeadm.IsCertificatePrivateKeyPairValid(
r.resource.Data[kubeadmconstants.FrontProxyCACertName],
r.resource.Data[kubeadmconstants.FrontProxyCAKeyName],
)
if err != nil {
r.Log.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.FrontProxyCACertAndKeyBaseName, err.Error()))
}
if isValid {
return nil
if checksum := tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()["checksum"] {
isValid, err := kubeadm.IsCertificatePrivateKeyPairValid(
r.resource.Data[kubeadmconstants.FrontProxyCACertName],
r.resource.Data[kubeadmconstants.FrontProxyCAKeyName],
)
if err != nil {
r.Log.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.FrontProxyCACertAndKeyBaseName, err.Error()))
}
if isValid {
return nil
}
}
config, err := getStoredKubeadmConfiguration(ctx, r, tenantControlPlane)
@@ -116,6 +119,13 @@ func (r *FrontProxyCACertificate) mutate(ctx context.Context, tenantControlPlane
},
))
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
r.resource.SetAnnotations(annotations)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}
}

View File

@@ -30,7 +30,7 @@ func (r *KubernetesIngressResource) ShouldStatusBeUpdated(ctx context.Context, t
}
func (r *KubernetesIngressResource) ShouldCleanup(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return !tenantControlPlane.Spec.ControlPlane.Ingress.Enabled
return tenantControlPlane.Spec.ControlPlane.Ingress == nil
}
func (r *KubernetesIngressResource) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
@@ -46,7 +46,7 @@ func (r *KubernetesIngressResource) CleanUp(ctx context.Context, tenantControlPl
}
func (r *KubernetesIngressResource) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
if tenantControlPlane.Spec.ControlPlane.Ingress.Enabled {
if tenantControlPlane.Spec.ControlPlane.Ingress != nil {
tenantControlPlane.Status.Kubernetes.Ingress.IngressStatus = r.resource.Status
tenantControlPlane.Status.Kubernetes.Ingress.Name = r.resource.GetName()
tenantControlPlane.Status.Kubernetes.Ingress.Namespace = r.resource.GetNamespace()
@@ -54,7 +54,7 @@ func (r *KubernetesIngressResource) UpdateTenantControlPlaneStatus(ctx context.C
return nil
}
tenantControlPlane.Status.Kubernetes.Ingress = kamajiv1alpha1.KubernetesIngressStatus{}
tenantControlPlane.Status.Kubernetes.Ingress = &kamajiv1alpha1.KubernetesIngressStatus{}
return nil
}

View File

@@ -32,9 +32,7 @@ type Agent struct {
}
func (r *Agent) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Addons.Konnectivity.Agent.Name != r.resource.GetName() ||
tenantControlPlane.Status.Addons.Konnectivity.Agent.Namespace != r.resource.GetNamespace() ||
tenantControlPlane.Status.Addons.Konnectivity.Agent.RV != r.resource.ObjectMeta.ResourceVersion
return tenantControlPlane.Status.Addons.Konnectivity.Agent.Checksum != r.resource.GetAnnotations()["checksum"]
}
func (r *Agent) ShouldCleanup(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -84,7 +82,7 @@ func (r *Agent) UpdateTenantControlPlaneStatus(ctx context.Context, tenantContro
tenantControlPlane.Status.Addons.Konnectivity.Agent = kamajiv1alpha1.ExternalKubernetesObjectStatus{
Name: r.resource.GetName(),
Namespace: r.resource.GetNamespace(),
RV: r.resource.ObjectMeta.ResourceVersion,
Checksum: r.resource.GetAnnotations()["checksum"],
LastUpdate: metav1.Now(),
}
tenantControlPlane.Status.Addons.Konnectivity.Enabled = true
@@ -195,6 +193,18 @@ func (r *Agent) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.T
SuccessThreshold: 1,
FailureThreshold: 3,
}
// Creating a copy to remove the metadata that would be changed at every reconciliation
c := r.resource.DeepCopy()
c.SetAnnotations(nil)
c.SetResourceVersion("")
yaml, _ := utilities.EncondeToYaml(c)
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.MD5Checksum(yaml)
r.resource.SetAnnotations(annotations)
return nil
}

View File

@@ -37,8 +37,7 @@ type CertificateResource struct {
}
func (r *CertificateResource) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Addons.Konnectivity.Certificate.SecretName != r.resource.GetName() ||
tenantControlPlane.Status.Addons.Konnectivity.Certificate.ResourceVersion != r.resource.ResourceVersion
return tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum != r.resource.GetAnnotations()["checksum"]
}
func (r *CertificateResource) ShouldCleanup(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -84,7 +83,7 @@ func (r *CertificateResource) UpdateTenantControlPlaneStatus(ctx context.Context
if tenantControlPlane.Spec.Addons.Konnectivity != nil {
tenantControlPlane.Status.Addons.Konnectivity.Certificate.LastUpdate = metav1.Now()
tenantControlPlane.Status.Addons.Konnectivity.Certificate.SecretName = r.resource.GetName()
tenantControlPlane.Status.Addons.Konnectivity.Certificate.ResourceVersion = r.resource.ResourceVersion
tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum = r.resource.GetAnnotations()["checksum"]
tenantControlPlane.Status.Addons.Konnectivity.Enabled = true
return nil
@@ -98,9 +97,7 @@ func (r *CertificateResource) UpdateTenantControlPlaneStatus(ctx context.Context
func (r *CertificateResource) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
latestCARV := tenantControlPlane.Status.Certificates.CA.ResourceVersion
actualCARV := r.resource.GetLabels()["latest-ca-rv"]
if latestCARV == actualCARV {
if checksum := tenantControlPlane.Status.Certificates.CA.Checksum; len(checksum) > 0 && checksum == utilities.CalculateConfigMapChecksum(r.resource.StringData) {
isValid, err := isCertificateAndKeyPairValid(
r.resource.Data[corev1.TLSCertKey],
r.resource.Data[corev1.TLSPrivateKeyKey],
@@ -134,15 +131,22 @@ func (r *CertificateResource) mutate(ctx context.Context, tenantControlPlane *ka
corev1.TLSCertKey: cert.Bytes(),
corev1.TLSPrivateKeyKey: privKey.Bytes(),
}
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"latest-ca-rv": latestCARV,
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
r.resource.SetAnnotations(annotations)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}
}

View File

@@ -25,7 +25,7 @@ type ClusterRoleBindingResource struct {
func (r *ClusterRoleBindingResource) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Addons.Konnectivity.ClusterRoleBinding.Name != r.resource.GetName() ||
tenantControlPlane.Status.Addons.Konnectivity.ClusterRoleBinding.RV != r.resource.ObjectMeta.ResourceVersion
tenantControlPlane.Status.Addons.Konnectivity.ClusterRoleBinding.Checksum != r.resource.ObjectMeta.GetAnnotations()["checksum"]
}
func (r *ClusterRoleBindingResource) ShouldCleanup(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -73,8 +73,8 @@ func (r *ClusterRoleBindingResource) UpdateTenantControlPlaneStatus(ctx context.
if tenantControlPlane.Spec.Addons.Konnectivity != nil {
tenantControlPlane.Status.Addons.Konnectivity.Enabled = true
tenantControlPlane.Status.Addons.Konnectivity.ClusterRoleBinding = kamajiv1alpha1.ExternalKubernetesObjectStatus{
Name: r.resource.GetName(),
RV: r.resource.ObjectMeta.ResourceVersion,
Name: r.resource.GetName(),
Checksum: r.resource.GetAnnotations()["checksum"],
}
return nil
@@ -110,6 +110,14 @@ func (r *ClusterRoleBindingResource) mutate(ctx context.Context, tenantControlPl
},
}
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
yaml, _ := utilities.EncondeToYaml(r.resource)
annotations["checksum"] = utilities.MD5Checksum(yaml)
return nil
}
}

View File

@@ -303,7 +303,7 @@ func (r *KubernetesDeploymentResource) syncVolumes(tenantControlPlane *kamajiv1a
r.resource.Spec.Template.Spec.Volumes[index].VolumeSource = corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: tenantControlPlane.Status.Addons.Konnectivity.EgressSelectorConfiguration,
Name: tenantControlPlane.Status.Addons.Konnectivity.ConfigMap.Name,
},
DefaultMode: pointer.Int32Ptr(420),
},

View File

@@ -59,19 +59,20 @@ func (r *EgressSelectorConfigurationResource) GetName() string {
}
func (r *EgressSelectorConfigurationResource) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Addons.Konnectivity.EgressSelectorConfiguration != r.resource.GetName()
return tenantControlPlane.Status.Addons.Konnectivity.ConfigMap.Checksum != r.resource.GetAnnotations()["checksum"]
}
func (r *EgressSelectorConfigurationResource) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
if tenantControlPlane.Spec.Addons.Konnectivity != nil {
tenantControlPlane.Status.Addons.Konnectivity.Enabled = true
tenantControlPlane.Status.Addons.Konnectivity.EgressSelectorConfiguration = r.resource.GetName()
tenantControlPlane.Status.Addons.Konnectivity.ConfigMap.Name = r.resource.GetName()
tenantControlPlane.Status.Addons.Konnectivity.ConfigMap.Checksum = r.resource.GetAnnotations()["checksum"]
return nil
}
tenantControlPlane.Status.Addons.Konnectivity.Enabled = true
tenantControlPlane.Status.Addons.Konnectivity.EgressSelectorConfiguration = ""
tenantControlPlane.Status.Addons.Konnectivity.Enabled = false
tenantControlPlane.Status.Addons.Konnectivity.ConfigMap = kamajiv1alpha1.KonnectivityConfigMap{}
return nil
}
@@ -109,6 +110,12 @@ func (r *EgressSelectorConfigurationResource) mutate(ctx context.Context, tenant
"egress-selector-configuration.yaml": string(yamlConfiguration),
}
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.MD5Checksum(yamlConfiguration)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}
}

View File

@@ -28,7 +28,7 @@ type KubeconfigResource struct {
}
func (r *KubeconfigResource) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Addons.Konnectivity.Kubeconfig.SecretName != r.resource.GetName()
return tenantControlPlane.Status.Addons.Konnectivity.Kubeconfig.Checksum != r.resource.GetAnnotations()["checksum"]
}
func (r *KubeconfigResource) ShouldCleanup(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -74,6 +74,7 @@ func (r *KubeconfigResource) UpdateTenantControlPlaneStatus(ctx context.Context,
if tenantControlPlane.Spec.Addons.Konnectivity != nil {
tenantControlPlane.Status.Addons.Konnectivity.Kubeconfig.LastUpdate = metav1.Now()
tenantControlPlane.Status.Addons.Konnectivity.Kubeconfig.SecretName = r.resource.GetName()
tenantControlPlane.Status.Addons.Konnectivity.Kubeconfig.Checksum = r.resource.GetAnnotations()["checksum"]
tenantControlPlane.Status.Addons.Konnectivity.Enabled = true
return nil
@@ -87,9 +88,7 @@ func (r *KubeconfigResource) UpdateTenantControlPlaneStatus(ctx context.Context,
func (r *KubeconfigResource) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
latestCARV := tenantControlPlane.Status.Addons.Konnectivity.Certificate.ResourceVersion
actualCARV := r.resource.GetLabels()["latest-certificate-rv"]
if latestCARV == actualCARV {
if checksum := tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()["checksum"] {
return nil
}
@@ -151,10 +150,14 @@ func (r *KubeconfigResource) mutate(ctx context.Context, tenantControlPlane *kam
konnectivityKubeconfigFileName: kubeconfigBytes,
}
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"latest-certificate-rv": latestCARV,
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},

View File

@@ -24,9 +24,7 @@ type ServiceAccountResource struct {
}
func (r *ServiceAccountResource) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Addons.Konnectivity.ServiceAccount.Name != r.resource.GetName() ||
tenantControlPlane.Status.Addons.Konnectivity.ServiceAccount.Namespace != r.resource.GetNamespace() ||
tenantControlPlane.Status.Addons.Konnectivity.ServiceAccount.RV != r.resource.ObjectMeta.ResourceVersion
return tenantControlPlane.Status.Addons.Konnectivity.ServiceAccount.Checksum != r.resource.GetAnnotations()["checksum"]
}
func (r *ServiceAccountResource) ShouldCleanup(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -76,7 +74,7 @@ func (r *ServiceAccountResource) UpdateTenantControlPlaneStatus(ctx context.Cont
tenantControlPlane.Status.Addons.Konnectivity.ServiceAccount = kamajiv1alpha1.ExternalKubernetesObjectStatus{
Name: r.resource.GetName(),
Namespace: r.resource.GetNamespace(),
RV: r.resource.ObjectMeta.ResourceVersion,
Checksum: r.resource.GetAnnotations()["checksum"],
}
tenantControlPlane.Status.Addons.Konnectivity.Enabled = true
@@ -99,6 +97,19 @@ func (r *ServiceAccountResource) mutate(ctx context.Context, tenantControlPlane
},
))
c := r.resource.DeepCopy()
c.SetAnnotations(nil)
c.SetResourceVersion("")
yaml, _ := utilities.EncondeToYaml(c)
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.MD5Checksum(yaml)
r.resource.SetAnnotations(annotations)
return nil
}
}

Some files were not shown because too many files have changed in this diff Show More