Compare commits
161 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a2fd50ed15 | ||
|
|
142bb670c1 | ||
|
|
30c49bc982 | ||
|
|
540e0c2bc2 | ||
|
|
a849a84fd0 | ||
|
|
bbfec75e7f | ||
|
|
ced34a50e6 | ||
|
|
1311220b94 | ||
|
|
0e57b32ebc | ||
|
|
4753c8ac8d | ||
|
|
b99639c9fa | ||
|
|
f3d95add5b | ||
|
|
dc3d5060ca | ||
|
|
06a55f6a70 | ||
|
|
7a160cdb74 | ||
|
|
9688d288b7 | ||
|
|
87c7c984de | ||
|
|
e5cccfe88b | ||
|
|
197518b0b4 | ||
|
|
7ac8e5e539 | ||
|
|
cec4f9136d | ||
|
|
4299b72d7f | ||
|
|
eff68db336 | ||
|
|
74a6eb6b80 | ||
|
|
21fe27935f | ||
|
|
e3a8ff90da | ||
|
|
8e6cea2d2d | ||
|
|
1c90a4f333 | ||
|
|
6123d9a5a4 | ||
|
|
587d3bb24e | ||
|
|
4465bd8449 | ||
|
|
cf1f2763f6 | ||
|
|
25dc19f839 | ||
|
|
1ccc1d1b1e | ||
|
|
1d96710890 | ||
|
|
edceda3302 | ||
|
|
755cc5bacd | ||
|
|
e0c86d685c | ||
|
|
ddb700f4f0 | ||
|
|
4bdddfc695 | ||
|
|
8b999f1323 | ||
|
|
2571086ff3 | ||
|
|
cd9d92296b | ||
|
|
f24ff618a9 | ||
|
|
4bf39149ec | ||
|
|
045c5bbd7c | ||
|
|
6eb3171817 | ||
|
|
289bad540c | ||
|
|
ac06447706 | ||
|
|
95de31d697 | ||
|
|
0037b4941c | ||
|
|
c251f57f06 | ||
|
|
129cb0e6fe | ||
|
|
73e0618ad3 | ||
|
|
6c2634b5e9 | ||
|
|
dac670113f | ||
|
|
c8039cdf5c | ||
|
|
a7cfc9a898 | ||
|
|
0f1a4f28de | ||
|
|
40f57466e2 | ||
|
|
feed6634a5 | ||
|
|
c85e686283 | ||
|
|
05ffd6cf75 | ||
|
|
e16855a1b4 | ||
|
|
d21eb135fd | ||
|
|
c5e12cc401 | ||
|
|
dc97d69d0c | ||
|
|
bac5d56076 | ||
|
|
973392bd85 | ||
|
|
30b36ba7f4 | ||
|
|
0db27a7335 | ||
|
|
facf23a055 | ||
|
|
6674373037 | ||
|
|
72712693a2 | ||
|
|
33709005b1 | ||
|
|
6ce83c551e | ||
|
|
2b638fe09d | ||
|
|
58a5cac9e8 | ||
|
|
e9d2af931a | ||
|
|
a996803db5 | ||
|
|
e34fc1851f | ||
|
|
740fe9c938 | ||
|
|
65854721de | ||
|
|
adde828e03 | ||
|
|
ffc2c7c967 | ||
|
|
0f195286a7 | ||
|
|
f768f93fe9 | ||
|
|
05cbff1fd8 | ||
|
|
7e94ecdbab | ||
|
|
648da19687 | ||
|
|
6c4b339c4b | ||
|
|
eee62032de | ||
|
|
8e8ee92fb2 | ||
|
|
f3be9e5442 | ||
|
|
fb296267f6 | ||
|
|
751ce3722b | ||
|
|
d99ffb0334 | ||
|
|
f831f385c4 | ||
|
|
f301c9bdc2 | ||
|
|
0909529e6b | ||
|
|
d0aacd03f6 | ||
|
|
f4c84946c0 | ||
|
|
2c72369b99 | ||
|
|
abcc662c96 | ||
|
|
792119d2d3 | ||
|
|
f0e675dea3 | ||
|
|
4413061640 | ||
|
|
8f57ff407e | ||
|
|
94f2d9074d | ||
|
|
fadcc219ec | ||
|
|
af5ac4acab | ||
|
|
069afd9b17 | ||
|
|
6741194034 | ||
|
|
7acba20056 | ||
|
|
0d2cf784f5 | ||
|
|
4db8230912 | ||
|
|
84c8b1a135 | ||
|
|
7cf930cbe9 | ||
|
|
d5e146ef8f | ||
|
|
cb5fb00d7b | ||
|
|
ed00b934ec | ||
|
|
dbaf3d1915 | ||
|
|
a625f2218c | ||
|
|
617e802d02 | ||
|
|
eca04893a8 | ||
|
|
14c96b034a | ||
|
|
f53271cb87 | ||
|
|
8007fe8cd2 | ||
|
|
11d8262c74 | ||
|
|
877314f53d | ||
|
|
27480ba66a | ||
|
|
d3d18ef836 | ||
|
|
c81d190719 | ||
|
|
9284a43860 | ||
|
|
6cab15551f | ||
|
|
f0fb8b3c11 | ||
|
|
778a34a382 | ||
|
|
25b1c7a8fa | ||
|
|
2c6360ad82 | ||
|
|
523f1cf0e3 | ||
|
|
4d6d1461cc | ||
|
|
49e016d4da | ||
|
|
b7a2d9da8c | ||
|
|
39c7591457 | ||
|
|
327438e236 | ||
|
|
ba4b3eec8f | ||
|
|
d06affc216 | ||
|
|
236540d89f | ||
|
|
a5b7605e27 | ||
|
|
3821cf1d67 | ||
|
|
be1737d908 | ||
|
|
b5a7ff6e6c | ||
|
|
9f937a1eec | ||
|
|
0ae3659949 | ||
|
|
736fbf0505 | ||
|
|
8dc0672718 | ||
|
|
27f598fbfc | ||
|
|
d3603c7187 | ||
|
|
83797fc0b3 | ||
|
|
517a4a3458 | ||
|
|
649cf0c852 |
6
.github/workflows/ci.yaml
vendored
@@ -14,12 +14,12 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.19'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3.2.0
|
||||
with:
|
||||
version: v1.49.0
|
||||
version: v1.54.2
|
||||
only-new-issues: false
|
||||
args: --timeout 5m --config .golangci.yml
|
||||
diff:
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.19'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
- run: make yaml-installation-file
|
||||
- name: Checking if YAML installer file is not aligned
|
||||
|
||||
12
.github/workflows/docker-ci.yml
vendored
@@ -12,12 +12,14 @@ jobs:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Generate build-args
|
||||
id: build-args
|
||||
run: |
|
||||
# Declare vars for internal use
|
||||
VERSION=$(git describe --abbrev=0 --tags)
|
||||
VERSION=$(make get_version)
|
||||
GIT_HEAD_COMMIT=$(git rev-parse --short HEAD)
|
||||
GIT_TAG_COMMIT=$(git rev-parse --short $VERSION)
|
||||
GIT_MODIFIED_1=$(git diff $GIT_HEAD_COMMIT $GIT_TAG_COMMIT --quiet && echo "" || echo ".dev")
|
||||
@@ -85,7 +87,13 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64,linux/arm
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
build-args:
|
||||
build-args: |
|
||||
GIT_LAST_TAG=${{ env.GIT_LAST_TAG }}
|
||||
GIT_HEAD_COMMIT=${{ env.GIT_HEAD_COMMIT }}
|
||||
GIT_TAG_COMMIT=${{ env.GIT_TAG_COMMIT }}
|
||||
GIT_MODIFIED=${{ env.GIT_MODIFIED }}
|
||||
GIT_REPO=${{ env.GIT_REPO }}
|
||||
BUILD_DATE=${{ env.BUILD_DATE }}
|
||||
|
||||
- name: Image digest
|
||||
run: echo ${{ steps.build-release.outputs.digest }}
|
||||
|
||||
4
.github/workflows/e2e.yaml
vendored
@@ -13,6 +13,7 @@ on:
|
||||
- 'main.go'
|
||||
- 'Makefile'
|
||||
- 'internal/**'
|
||||
- 'cmd/**'
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
paths:
|
||||
@@ -25,6 +26,7 @@ on:
|
||||
- 'main.go'
|
||||
- 'Makefile'
|
||||
- 'internal/**'
|
||||
- 'cmd/**'
|
||||
|
||||
jobs:
|
||||
kind:
|
||||
@@ -36,7 +38,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.19'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
- run: |
|
||||
sudo apt-get update
|
||||
|
||||
3
.gitignore
vendored
@@ -24,6 +24,9 @@ bin
|
||||
*~
|
||||
.vscode
|
||||
|
||||
# Tilt files.
|
||||
.tiltbuild
|
||||
|
||||
**/*.kubeconfig
|
||||
**/*.crt
|
||||
**/*.key
|
||||
|
||||
@@ -11,6 +11,7 @@ linters-settings:
|
||||
|
||||
linters:
|
||||
disable:
|
||||
- depguard
|
||||
- wrapcheck
|
||||
- gomnd
|
||||
- scopelint
|
||||
|
||||
22
ADOPTERS.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Adopters
|
||||
|
||||
This is a list of companies that have adopted Kamaji.
|
||||
Feel free to open a Pull-Request to get yours listed.
|
||||
|
||||
### Adopter list (alphabetically)
|
||||
|
||||
| Type | Name | Since | Website | Use-Case |
|
||||
|:-|:-|:-|:-|:-|
|
||||
| End-user | KINX | 2024 | [link](https://kinx.net/?lang=en) | KINX is an Internet infrastructure service provider and will use kamaji for its new [Managed Kubernetes Service](https://kinx.net/service/cloud/kubernetes/intro/?lang=en). |
|
||||
| End-user | sevensphere | 2023 | [link](https://www.sevensphere.io) | Sevensphere provides consulting services for end-user companies / cloud providers and uses Kamaji for designing cloud/on-premises Kubernetes-as-a-Service platform. |
|
||||
| Vendor | Ænix | 2023 | [link](https://aenix.io/) | Ænix provides consulting services for cloud providers and uses Kamaji for running Kubernetes-as-a-Service in free PaaS platform [Cozystack](https://cozystack.io). |
|
||||
| Vendor | Netsons | 2023 | [link](https://www.netsons.com) | Netsons is an Italian hosting and cloud provider and uses Kamaji in its [Managed Kubernetes](https://www.netsons.com/kubernetes) offering. |
|
||||
| Vendor | Aknostic | 2023 | [link](https://aknostic.com) | Aknostic is a cloud-native consultancy company using Kamaji to build a Kubernetes based PaaS. |
|
||||
|
||||
### Adopter Types
|
||||
|
||||
**End-user**: The organization runs Kamaji in production in some way.
|
||||
|
||||
**Integration**: The organization has a product that integrates with Kamaji, but does not contain Kamaji.
|
||||
|
||||
**Vendor**: The organization packages Kamaji in their product and sells it as part of their product.
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.19 as builder
|
||||
FROM golang:1.22 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
|
||||
12
Makefile
@@ -3,7 +3,7 @@
|
||||
# To re-generate a bundle for another specific version without changing the standard setup, you can:
|
||||
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
|
||||
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
|
||||
VERSION ?= 0.2.2
|
||||
VERSION ?= 0.5.1
|
||||
|
||||
# CHANNELS define the bundle channels used in the bundle.
|
||||
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
|
||||
@@ -85,11 +85,11 @@ kind: ## Download kind locally if necessary.
|
||||
|
||||
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
|
||||
controller-gen: ## Download controller-gen locally if necessary.
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.9.2)
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0)
|
||||
|
||||
GOLANGCI_LINT = $(shell pwd)/bin/golangci-lint
|
||||
golangci-lint: ## Download golangci-lint locally if necessary.
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.49.0)
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2)
|
||||
|
||||
KUSTOMIZE = $(shell pwd)/bin/kustomize
|
||||
kustomize: ## Download kustomize locally if necessary.
|
||||
@@ -154,6 +154,10 @@ GIT_MODIFIED ?= $$(echo "$(GIT_MODIFIED_1)$(GIT_MODIFIED_2)")
|
||||
GIT_REPO ?= $$(git config --get remote.origin.url)
|
||||
BUILD_DATE ?= $$(git log -1 --format="%at" | xargs -I{} date -d @{} +%Y-%m-%dT%H:%M:%S)
|
||||
|
||||
.PHONY: get_version
|
||||
get_version:
|
||||
@echo -n v$(VERSION)
|
||||
|
||||
build: generate fmt vet ## Build manager binary.
|
||||
go build -o bin/manager main.go
|
||||
|
||||
@@ -165,7 +169,7 @@ docker-build: ## Build docker image with the manager.
|
||||
--build-arg GIT_TAG_COMMIT=$(GIT_TAG_COMMIT) \
|
||||
--build-arg GIT_MODIFIED=$(GIT_MODIFIED) \
|
||||
--build-arg GIT_REPO=$(GIT_REPO) \
|
||||
--build-arg GIT_LAST_TAG=$(VERSION) \
|
||||
--build-arg GIT_LAST_TAG=v$(VERSION) \
|
||||
--build-arg BUILD_DATE=$(BUILD_DATE)
|
||||
|
||||
docker-push: ## Push docker image with the manager.
|
||||
|
||||
8
PROJECT
@@ -16,10 +16,6 @@ resources:
|
||||
kind: TenantControlPlane
|
||||
path: github.com/clastix/kamaji/api/v1alpha1
|
||||
version: v1alpha1
|
||||
webhooks:
|
||||
defaulting: true
|
||||
validation: true
|
||||
webhookVersion: v1
|
||||
- api:
|
||||
crdVersion: v1
|
||||
domain: clastix.io
|
||||
@@ -27,8 +23,4 @@ resources:
|
||||
kind: DataStore
|
||||
path: github.com/clastix/kamaji/api/v1alpha1
|
||||
version: v1alpha1
|
||||
webhooks:
|
||||
defaulting: true
|
||||
validation: true
|
||||
webhookVersion: v1
|
||||
version: "3"
|
||||
|
||||
178
README.md
@@ -3,64 +3,158 @@
|
||||
<p align="left">
|
||||
<img src="https://img.shields.io/github/license/clastix/kamaji"/>
|
||||
<img src="https://img.shields.io/github/go-mod/go-version/clastix/kamaji"/>
|
||||
<a href="https://github.com/clastix/kamaji/releases">
|
||||
<img src="https://img.shields.io/github/v/release/clastix/kamaji"/>
|
||||
</a>
|
||||
<a href="https://github.com/clastix/kamaji/releases"><img src="https://img.shields.io/github/v/release/clastix/kamaji"/></a>
|
||||
<img src="https://goreportcard.com/badge/github.com/clastix/kamaji">
|
||||
<a href="https://kubernetes.slack.com/archives/C03GLTTMWNN"><img alt="#kamaji on Kubernetes Slack" src="https://img.shields.io/badge/slack-@kubernetes/kamaji-blue.svg?logo=slack"/></a>
|
||||
</p>
|
||||
|
||||
**Kamaji** deploys and operates **Kubernetes** at scale with a fraction of the operational burden.
|
||||

|
||||

|
||||
|
||||
<p align="center" style="padding: 6px 6px">
|
||||
<img src="assets/kamaji-logo.png" />
|
||||
</p>
|
||||
### 🤔 What is Kamaji?
|
||||
|
||||
## Why we are building it?
|
||||
Global hyper-scalers are leading the Managed Kubernetes space, while other cloud providers, as well as large corporations, are struggling to offer the same experience to their DevOps teams because of the lack of the right tools. Also, current Kubernetes solutions are mainly designed with an enterprise-first approach and they are too costly when deployed at scale.
|
||||
**Kamaji** is a **Kubernetes Control Plane Manager** leveraging on the concept of [**Hosted Control Plane**](https://clastix.io/post/the-raise-of-hosted-control-plane-in-kubernetes/).
|
||||
|
||||
**Kamaji** aims to solve these pains by leveraging multi-tenancy and simplifying how to run multiple control planes on the same infrastructure with a fraction of the operational burden.
|
||||
Kamaji's approach is based on running the Kubernetes Control Plane components in Pods instead of dedicated machines.
|
||||
This allows operating Kubernetes clusters at scale, with a fraction of the operational burden.
|
||||
Thanks to this approach, running multiple Control Planes can be cheaper and easier to deploy and operate.
|
||||
|
||||
## How it works
|
||||
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. Kamaji is special because the Control Planes of _“tenant clusters”_ are just regular pods instead of dedicated Virtual Machines. This solution makes running Control Planes at scale cheaper and easier to deploy and operate.
|
||||
_Kamaji is like a fleet of Site Reliability Engineers with expertise codified into its logic, working 24/7 to keep up and running your Control Planes._
|
||||
|
||||

|
||||

|
||||
<img src="docs/content/images/architecture.png" width="600" style="display: block; margin: 0 auto">
|
||||
|
||||
## Getting started
|
||||
### 📖 How it works
|
||||
|
||||
Please refer to the [Getting Started guide](https://kamaji.clastix.io/getting-started/) to deploy a minimal setup of Kamaji on KinD.
|
||||
Kamaji is extending the Kubernetes API capabilities thanks to [Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions).
|
||||
|
||||
## Features
|
||||
By installing Kamaji, two pairs of new APIs will be available:
|
||||
|
||||
- **Self Service Kubernetes:** leave users the freedom to self-provision their Kubernetes clusters according to the assigned boundaries.
|
||||
- **Multi-cluster Management:** centrally manage multiple tenant clusters from a single admin cluster. Happy SREs.
|
||||
- **Cheaper Control Planes:** place multiple tenant control planes on a single node, instead of having three nodes for a single control plane.
|
||||
- **Stronger Multi-Tenancy:** leave tenants to access the control plane with admin permissions while keeping the tenant isolated at the infrastructure level.
|
||||
- **Kubernetes Inception:** use Kubernetes to manage Kubernetes by re-using all the Kubernetes goodies you already know and love.
|
||||
- **Full APIs compliant:** tenant clusters are fully CNCF compliant built with upstream Kubernetes binaries. A user does not see differences between a Kamaji provisioned cluster and a dedicated cluster.
|
||||
- `TenantControlPlane`, the instance definition of your desired Kubernetes Control Plane
|
||||
- `Datastore`, the backing store used by one (or more) `TenantControlPlane`
|
||||
|
||||
## Roadmap
|
||||
The `TenantControlPlane` (short-named as `tcp`) objects are Namespace-scoped and allows configuring every aspect of your desired Control Plane.
|
||||
Besides the Kubernetes configuration values, you can specify the Pod options such as limit, request, tolerations, node selector, etc.,
|
||||
as well as how these should be exposed (e.g.: using a `ClusterIP`, a `LoadBalancer`, or a `NodePort`).
|
||||
|
||||
- [x] Benchmarking
|
||||
- [ ] Stress-test
|
||||
- [x] Support for dynamic address allocation on native Load Balancer
|
||||
The `TenantControlPlane` is the stateless definition of the Control Plane allowing to set up the required components for a full-fledged Kubernetest cluster.
|
||||
The state is managed by the `Datastore` API, a cluster-scoped resource which can hold the data of one or more Kubernetes clusters.
|
||||
|
||||
> For further information about the API specifications and all the available options,
|
||||
> refer to the official [API reference](https://kamaji.clastix.io/reference/api/#tenantcontrolplane).
|
||||
|
||||
### ⭐️ Main features
|
||||
|
||||
- **Fast provisioning time**: depending on the infrastructure, Tenant Control Planes are up and ready to serve traffic in **16 seconds**.
|
||||
- **Streamlined update**: the rollout to a new Kubernetes version for a given Tenant Control Plane takes just **10 seconds**, with a Blue/Green deployment to avoid serving mixed Kubernetes versions.
|
||||
- **Resource optimization**: thanks to the Datastore decoupling, there's no need of odd number instances (e.g.: RAFT consensus) by allowing to save up to 60% of HW resources.
|
||||
- **Scale from zero to the moon**: scale down the instance when there's no usage, or automatically scale to support the traffic spikes reusing the Kubernetes patterns.
|
||||
- **Declarative approach, constant reconciliation**: thanks to the Operator pattern, drift detection happens in real-time, maintaining the desired state.
|
||||
- **Automated certificates management**: Kamaji leverages on `kubeadm` and the certificates are automatically created and rotated for you.
|
||||
- **Managing core addons**: Kamaji allows configuring automatically `kube-proxy`, `CoreDNS`, and `konnectivity`, with automatic remediation in case of user errors (e.g.: deleting the `CoreDNS` deployment).
|
||||
- **Auto Healing**: the `TenantControlPlane` objects in the management cluster are tracked by Kamaji, in case of deletion of those, everything is created in an idempotent way.
|
||||
- **Datastore multi-tenancy**: optionally, Kamaji allows running multiple Control Planes on the same _Datastore_ instance leveraging on the multi-tenancy of each driver, decreasing operations and optimizing costs.
|
||||
- **Overcoming `etcd` limitations**: optionally, Kamaji allows using a different _Datastore_ thanks to [`kine`](https://github.com/k3s-io/kine) by supporting `MySQL` or `PostgreSQL` as an alternative.
|
||||
- **Simplifying mixed-networks setup**: thanks to [`Konnectivity`](https://kubernetes.io/docs/tasks/extend-kubernetes/setup-konnectivity/),
|
||||
the Tenant Control Plane is connected to the worker nodes hosted in a different network, overcoming the no-NAT availability when dealing with nodes with a non routable IP address
|
||||
(e.g.: worker nodes in a different infrastructure).
|
||||
|
||||
### 🚀 Use cases
|
||||
|
||||
- [**Creating a private Managed Kubernetes Service**](https://clastix.io/post/netsons-builds-a-managed-kubernetes-service-with-kamaji-and-open-stack/)
|
||||
- [**Building a Platform as a Service**](https://aenix.io/cozystack/)
|
||||
- [**Overcoming public Managed Kubernetes Services**](https://clastix.io/post/overcoming-eks-limitations-with-kamaji-on-aws/) such as EKS
|
||||
- [**Hybrid infrastructures**](https://clastix.io/post/bridging-the-gap-hybrid-kubernetes-clusters-with-remote-control-planes/):
|
||||
host the Control Plane on the Cloud and worker nodes on prem or vice-versa, according to your needs.
|
||||
- [**Kubernetes at the edge**](https://clastix.io/post/edgevolution-unleashing-the-power-of-kubernetes-clusters-for-a-revolutionary-edge-computing-experience/):
|
||||
take full advantage of the _Kubernetes API Server as a service_ paradigm.
|
||||
- **Kubernetes Control Plane as a Service:** centrally manage multiple Kubernetes clusters from a single management point (_Multi-Cluster management_).
|
||||
- **High-density Control Plane:** place multiple control planes on the same infrastructure, instead of having dedicated machines for each control plane.
|
||||
- **Strong Multi-tenancy:** leave users to access the control plane with admin permissions while keeping them isolated at the infrastructure level.
|
||||
- **Kubernetes Inception:** use Kubernetes to manage Kubernetes with automation, high-availability, fault tolerance, and autoscaling out of the box.
|
||||
- **Bring Your Own Device:** keep the control plane isolated from data plane. Worker nodes can join and run consistently from everywhere: cloud, edge, and data-center.
|
||||
- **Full CNCF compliant:** all clusters are built with upstream Kubernetes binaries, resulting in full CNCF compliant Kubernetes clusters.
|
||||
|
||||
> 🤔 You'd like to do the same but don't know how?
|
||||
> 💡 [CLASTIX](https://clastix.io/) can help you with your needs!
|
||||
|
||||
### 🧑💻 Production grade
|
||||
|
||||
Kamaji is empowering several businesses, and it counts public adopters.
|
||||
Check out the [adopters](./ADOPTERS.md) file to learn more.
|
||||
|
||||
> 🤗 If you're using Kamaji, share your love by opening a PR!
|
||||
|
||||
### 🍦 Vanilla Kubernetes clusters
|
||||
|
||||
Kamaji is **not** yet-another-Kubernetes distribution: you have full freedom on the technology stack to provide to end users.
|
||||
Kamaji is a perfect fit for Platform Engineering, hiding the complexity of the Control Plane management to developers and DevOps engineers.
|
||||
|
||||
The provided Kubernetes Control Planes are [CNCF compliant clusters](https://kamaji.clastix.io/reference/conformance/).
|
||||
|
||||
<img src="https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/certified-kubernetes/versionless/color/certified-kubernetes-color.png" style="display: block; width: 75px; margin: 0 auto">
|
||||
|
||||
### 🐢 Cluster API support
|
||||
|
||||
Kamaji is **not** a [Cluster API](https://cluster-api.sigs.k8s.io/) replacement, rather, it plays very well with it.
|
||||
|
||||
Since Kamaji is just focusing on the Control Plane a [Kamaji's Cluster API Control Plane provider](https://github.com/clastix/cluster-api-control-plane-provider-kamaji) has been developed.
|
||||
|
||||
### 🛣️ Roadmap
|
||||
|
||||
- [x] Dynamic address on Load Balancer
|
||||
- [x] Zero Downtime Tenant Control Plane upgrade
|
||||
- [x] `konnectivity` integration
|
||||
- [ ] Provisioning of Tenant Control Plane through Cluster APIs
|
||||
- [x] [Join worker nodes from anywhere thanks to Konnectivity](https://kamaji.clastix.io/concepts/#konnectivity)
|
||||
- [x] [Alternative datastore MySQL and PostgreSQL](https://kamaji.clastix.io/guides/alternative-datastore/)
|
||||
- [x] [Pool of multiple datastores](https://kamaji.clastix.io/concepts/#datastores)
|
||||
- [x] [Seamless migration between datastores](https://kamaji.clastix.io/guides/datastore-migration/)
|
||||
- [ ] Automatic assignment to a datastore
|
||||
- [ ] Autoscaling of Tenant Control Plane
|
||||
- [x] [Provisioning through Cluster APIs](https://github.com/clastix/cluster-api-control-plane-provider-kamaji)
|
||||
- [ ] Terraform provider
|
||||
- [ ] Custom Prometheus metrics for monitoring and alerting
|
||||
- [x] `kine` integration for MySQL as datastore
|
||||
- [x] `kine` integration for PostgreSQL as datastore
|
||||
- [x] Pool of multiple datastores
|
||||
- [x] Seamless migration between datastore with the same driver
|
||||
- [ ] Automatic assigning of Tenant Control Plane to a datastore
|
||||
- [ ] Autoscaling of Tenant Control Plane pods
|
||||
- [ ] Custom Prometheus metrics
|
||||
|
||||
### 🎥 Multimedia
|
||||
|
||||
## Documentation
|
||||
Please, check the project's [documentation](https://kamaji.clastix.io/) for getting started with Kamaji.
|
||||
- Playlist ▶️ [Tutorials and How-Tos by Dario Tranchitella, CLASTIX](https://www.youtube.com/playlist?list=PLjiUjoV4Ws_3pNsUpTXI-KKk731nD2MQY)
|
||||
- YouTube ▶️ [Metal³ provisioning with Kamaji Hosted Control Planes by Huy Mai, Ericsson](https://youtu.be/u9sbURj6jXY?t=10536)
|
||||
- YouTube ▶️ [Hands-on introduction to Kamaji](https://www.youtube.com/watch?v=HhevxwQWQ88)
|
||||
- YouTube ▶️ [Scaling Kubernetes up to 1,000 Control Planes](https://www.youtube.com/watch?v=W_HXRXJh96U)
|
||||
- YouTube ▶️ [Equinix, Kamaji, and Cluster API](https://www.youtube.com/watch?v=TLBTqROj_wA)
|
||||
- YouTube ▶️ [Rancher & Kamaji: solving multitenancy challenges in the Kubernetes world](https://www.youtube.com/watch?v=VXHNrMmlF8U)
|
||||
- YouTube ▶️ [Enabling Self-Service Kubernetes clusters with Kamaji and Paralus](https://www.youtube.com/watch?v=JWA2LwZazM0)
|
||||
|
||||
## Contributions
|
||||
Kamaji is Open Source with Apache 2 license and any contribution is welcome.
|
||||
### 🏷️ Versioning
|
||||
|
||||
## Community
|
||||
Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
|
||||
Versioning adheres to the [Semantic Versioning](http://semver.org/) principles.
|
||||
A full list of the available releases is available in the GitHub repository's [**Release** section](https://github.com/clastix/kamaji/releases).
|
||||
|
||||
### 📄 Documentation
|
||||
|
||||
Further documentation can be found on the official [Kamaji documentation website](https://kamaji.clastix.io/).
|
||||
|
||||
### 🤝 Contributions
|
||||
|
||||
Contributions are highly appreciated and very welcomed!
|
||||
|
||||
In case of bugs, please, check if the issue has been already opened by checking the [GitHub Issues](https://github.com/clastix/kamaji/issues) section.
|
||||
In case it isn't, you can open a new one: a detailed report will help us to replicate it, assess it, and work on a fix.
|
||||
|
||||
You can express your intention in working on the fix on your own.
|
||||
The commit messages are checked according to the described [semantics](https://github.com/projectcapsule/capsule/blob/main/CONTRIBUTING.md#semantics).
|
||||
Commits are used to generate the changelog, and their author will be referenced in it.
|
||||
|
||||
In case of **✨ Feature Requests** please use the [Discussion's Feature Request section](https://github.com/clastix/kamaji/discussions/categories/feature-requests).
|
||||
|
||||
### 📝 License
|
||||
|
||||
The Kamaji Cluster API Control Plane provider is licensed under Apache 2.0.
|
||||
The code is provided as-is with no warranties.
|
||||
|
||||
### 🛟 Commercial Support
|
||||
|
||||
 [CLASTIX](https://clastix.io/) is the commercial company behind Kamaji and the Cluster API Control Plane provider.
|
||||
|
||||
If you're looking to run Kamaji in production and would like to learn more, **CLASTIX** can help by offering [Open Source support plans](https://clastix.io/support),
|
||||
as well as providing a comprehensive Enterprise Platform named [CLASTIX Enterprise Platform](https://clastix.cloud/), built on top of the Kamaji and [Capsule](https://capsule.clastix.io/) project (now donated to CNCF as a Sandbox project).
|
||||
|
||||
Feel free to get in touch with the provided [Contact form](https://clastix.io/contact).
|
||||
@@ -1,57 +0,0 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
//+kubebuilder:webhook:path=/validate--v1-secret,mutating=false,failurePolicy=ignore,sideEffects=None,groups="",resources=secrets,verbs=delete,versions=v1,name=vdatastoresecrets.kb.io,admissionReviewVersions=v1
|
||||
|
||||
type dataStoreSecretValidator struct {
|
||||
log logr.Logger
|
||||
client client.Client
|
||||
}
|
||||
|
||||
func (d *dataStoreSecretValidator) ValidateCreate(context.Context, runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreSecretValidator) ValidateUpdate(context.Context, runtime.Object, runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreSecretValidator) ValidateDelete(ctx context.Context, obj runtime.Object) error {
|
||||
secret := obj.(*corev1.Secret) //nolint:forcetypeassert
|
||||
|
||||
dsList := &DataStoreList{}
|
||||
|
||||
if err := d.client.List(ctx, dsList, client.MatchingFieldsSelector{Selector: fields.OneTermEqualSelector(DatastoreUsedSecretNamespacedNameKey, fmt.Sprintf("%s/%s", secret.GetNamespace(), secret.GetName()))}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dsList.Items) > 0 {
|
||||
var res []string
|
||||
|
||||
for _, ds := range dsList.Items {
|
||||
res = append(res, ds.GetName())
|
||||
}
|
||||
|
||||
return fmt.Errorf("the Secret is used by the following kamajiv1alpha1.DataStores and cannot be deleted (%s)", strings.Join(res, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreSecretValidator) Default(context.Context, runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
//+kubebuilder:webhook:path=/mutate-kamaji-clastix-io-v1alpha1-datastore,mutating=true,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=datastores,verbs=create;update,versions=v1alpha1,name=mdatastore.kb.io,admissionReviewVersions=v1
|
||||
//+kubebuilder:webhook:path=/validate-kamaji-clastix-io-v1alpha1-datastore,mutating=false,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=datastores,verbs=create;update;delete,versions=v1alpha1,name=vdatastore.kb.io,admissionReviewVersions=v1
|
||||
|
||||
func (in *DataStore) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
||||
secretValidator := &dataStoreSecretValidator{
|
||||
log: mgr.GetLogger().WithName("datastore-secret-webhook"),
|
||||
client: mgr.GetClient(),
|
||||
}
|
||||
|
||||
if err := ctrl.NewWebhookManagedBy(mgr).For(&corev1.Secret{}).WithValidator(secretValidator).Complete(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dsValidator := &dataStoreValidator{
|
||||
log: mgr.GetLogger().WithName("datastore-webhook"),
|
||||
client: mgr.GetClient(),
|
||||
}
|
||||
|
||||
return ctrl.NewWebhookManagedBy(mgr).
|
||||
For(in).
|
||||
WithValidator(dsValidator).
|
||||
WithDefaulter(dsValidator).
|
||||
Complete()
|
||||
}
|
||||
|
||||
type dataStoreValidator struct {
|
||||
log logr.Logger
|
||||
client client.Client
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) ValidateCreate(ctx context.Context, obj runtime.Object) error {
|
||||
ds, ok := obj.(*DataStore)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.DataStore")
|
||||
}
|
||||
|
||||
if err := d.validate(ctx, ds); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error {
|
||||
old, ok := oldObj.(*DataStore)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.DataStore")
|
||||
}
|
||||
|
||||
ds, ok := newObj.(*DataStore)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.DataStore")
|
||||
}
|
||||
|
||||
d.log.Info("validate update", "name", ds.GetName())
|
||||
|
||||
if ds.Spec.Driver != old.Spec.Driver {
|
||||
return fmt.Errorf("driver of a DataStore cannot be changed")
|
||||
}
|
||||
|
||||
if err := d.validate(ctx, ds); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) ValidateDelete(ctx context.Context, obj runtime.Object) error {
|
||||
ds, ok := obj.(*DataStore)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.DataStore")
|
||||
}
|
||||
|
||||
tcpList := &TenantControlPlaneList{}
|
||||
|
||||
if err := d.client.List(ctx, tcpList, client.MatchingFieldsSelector{Selector: fields.OneTermEqualSelector(TenantControlPlaneUsedDataStoreKey, ds.GetName())}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(tcpList.Items) > 0 {
|
||||
return fmt.Errorf("the DataStore is used by multiple TenantControlPlanes and cannot be removed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) Default(context.Context, runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) validate(ctx context.Context, ds *DataStore) error {
|
||||
if ds.Spec.BasicAuth != nil {
|
||||
if err := d.validateBasicAuth(ctx, ds); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.validateTLSConfig(ctx, ds); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) validateBasicAuth(ctx context.Context, ds *DataStore) error {
|
||||
if err := d.validateContentReference(ctx, ds.Spec.BasicAuth.Password); err != nil {
|
||||
return fmt.Errorf("basic-auth password is not valid, %w", err)
|
||||
}
|
||||
|
||||
if err := d.validateContentReference(ctx, ds.Spec.BasicAuth.Username); err != nil {
|
||||
return fmt.Errorf("basic-auth username is not valid, %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) validateTLSConfig(ctx context.Context, ds *DataStore) error {
|
||||
if err := d.validateContentReference(ctx, ds.Spec.TLSConfig.CertificateAuthority.Certificate); err != nil {
|
||||
return fmt.Errorf("CA certificate is not valid, %w", err)
|
||||
}
|
||||
|
||||
if ds.Spec.Driver == EtcdDriver {
|
||||
if ds.Spec.TLSConfig.CertificateAuthority.PrivateKey == nil {
|
||||
return fmt.Errorf("CA private key is required when using the etcd driver")
|
||||
}
|
||||
}
|
||||
|
||||
if ds.Spec.TLSConfig.CertificateAuthority.PrivateKey != nil {
|
||||
if err := d.validateContentReference(ctx, *ds.Spec.TLSConfig.CertificateAuthority.PrivateKey); err != nil {
|
||||
return fmt.Errorf("CA private key is not valid, %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.validateContentReference(ctx, ds.Spec.TLSConfig.ClientCertificate.Certificate); err != nil {
|
||||
return fmt.Errorf("client certificate is not valid, %w", err)
|
||||
}
|
||||
|
||||
if err := d.validateContentReference(ctx, ds.Spec.TLSConfig.ClientCertificate.PrivateKey); err != nil {
|
||||
return fmt.Errorf("client private key is not valid, %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) validateContentReference(ctx context.Context, ref ContentRef) error {
|
||||
switch {
|
||||
case len(ref.Content) > 0:
|
||||
return nil
|
||||
case ref.SecretRef == nil:
|
||||
return fmt.Errorf("the Secret reference is mandatory when bare content is not specified")
|
||||
case len(ref.SecretRef.SecretReference.Name) == 0:
|
||||
return fmt.Errorf("the Secret reference name is mandatory")
|
||||
case len(ref.SecretRef.SecretReference.Namespace) == 0:
|
||||
return fmt.Errorf("the Secret reference namespace is mandatory")
|
||||
}
|
||||
|
||||
if err := d.client.Get(ctx, types.NamespacedName{Name: ref.SecretRef.SecretReference.Name, Namespace: ref.SecretRef.SecretReference.Namespace}, &corev1.Secret{}); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("secret %s/%s is not found", ref.SecretRef.SecretReference.Namespace, ref.SecretRef.SecretReference.Name)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
18
api/v1alpha1/tenantcontrolplane_registrysettings.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
type RegistrySettings struct {
|
||||
// +kubebuilder:default="registry.k8s.io"
|
||||
Registry string `json:"registry,omitempty"`
|
||||
// The tag to append to all the Control Plane container images.
|
||||
// Optional.
|
||||
TagSuffix string `json:"tagSuffix,omitempty"`
|
||||
// +kubebuilder:default="kube-apiserver"
|
||||
APIServerImage string `json:"apiServerImage,omitempty"`
|
||||
// +kubebuilder:default="kube-controller-manager"
|
||||
ControllerManagerImage string `json:"controllerManagerImage,omitempty"`
|
||||
// +kubebuilder:default="kube-scheduler"
|
||||
SchedulerImage string `json:"schedulerImage,omitempty"`
|
||||
}
|
||||
30
api/v1alpha1/tenantcontrolplane_registrysettings_funcs.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func (r *RegistrySettings) buildContainerImage(name, tag string) string {
|
||||
image := fmt.Sprintf("%s/%s:%s", r.Registry, name, tag)
|
||||
|
||||
if len(r.TagSuffix) > 0 {
|
||||
image += r.TagSuffix
|
||||
}
|
||||
|
||||
return image
|
||||
}
|
||||
|
||||
func (r *RegistrySettings) KubeAPIServerImage(version string) string {
|
||||
return r.buildContainerImage(r.APIServerImage, version)
|
||||
}
|
||||
|
||||
func (r *RegistrySettings) KubeSchedulerImage(version string) string {
|
||||
return r.buildContainerImage(r.SchedulerImage, version)
|
||||
}
|
||||
|
||||
func (r *RegistrySettings) KubeControllerManagerImage(version string) string {
|
||||
return r.buildContainerImage(r.ControllerManagerImage, version)
|
||||
}
|
||||
@@ -93,27 +93,22 @@ type IngressSpec struct {
|
||||
Hostname string `json:"hostname,omitempty"`
|
||||
}
|
||||
|
||||
// ComponentResourceRequirements describes the compute resource requirements.
|
||||
type ComponentResourceRequirements struct {
|
||||
// Limits describes the maximum amount of compute resources allowed.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
Limits corev1.ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
|
||||
// Requests describes the minimum amount of compute resources required.
|
||||
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
|
||||
// otherwise to an implementation-defined value.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
Requests corev1.ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
|
||||
}
|
||||
|
||||
type ControlPlaneComponentsResources struct {
|
||||
APIServer *ComponentResourceRequirements `json:"apiServer,omitempty"`
|
||||
ControllerManager *ComponentResourceRequirements `json:"controllerManager,omitempty"`
|
||||
Scheduler *ComponentResourceRequirements `json:"scheduler,omitempty"`
|
||||
APIServer *corev1.ResourceRequirements `json:"apiServer,omitempty"`
|
||||
ControllerManager *corev1.ResourceRequirements `json:"controllerManager,omitempty"`
|
||||
Scheduler *corev1.ResourceRequirements `json:"scheduler,omitempty"`
|
||||
// Define the kine container resources.
|
||||
// Available only if Kamaji is running using Kine as backing storage.
|
||||
Kine *corev1.ResourceRequirements `json:"kine,omitempty"`
|
||||
}
|
||||
|
||||
type DeploymentSpec struct {
|
||||
// RegistrySettings allows to override the default images for the given Tenant Control Plane instance.
|
||||
// It could be used to point to a different container registry rather than the public one.
|
||||
// +kubebuilder:default={registry:"registry.k8s.io",apiServerImage:"kube-apiserver",controllerManagerImage:"kube-controller-manager",schedulerImage:"kube-scheduler"}
|
||||
RegistrySettings RegistrySettings `json:"registrySettings,omitempty"`
|
||||
// +kubebuilder:default=2
|
||||
Replicas int32 `json:"replicas,omitempty"`
|
||||
Replicas *int32 `json:"replicas,omitempty"`
|
||||
// NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
// Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
@@ -143,9 +138,27 @@ type DeploymentSpec struct {
|
||||
// (kube-apiserver, controller-manager, and scheduler).
|
||||
Resources *ControlPlaneComponentsResources `json:"resources,omitempty"`
|
||||
// ExtraArgs allows adding additional arguments to the Control Plane components,
|
||||
// such as kube-apiserver, controller-manager, and scheduler.
|
||||
// such as kube-apiserver, controller-manager, and scheduler. WARNING - This option
|
||||
// can override existing parameters and cause components to misbehave in unxpected ways.
|
||||
// Only modify if you know what you are doing.
|
||||
ExtraArgs *ControlPlaneExtraArgs `json:"extraArgs,omitempty"`
|
||||
AdditionalMetadata AdditionalMetadata `json:"additionalMetadata,omitempty"`
|
||||
// AdditionalInitContainers allows adding additional init containers to the Control Plane deployment.
|
||||
AdditionalInitContainers []corev1.Container `json:"additionalInitContainers,omitempty"`
|
||||
// AdditionalContainers allows adding additional containers to the Control Plane deployment.
|
||||
AdditionalContainers []corev1.Container `json:"additionalContainers,omitempty"`
|
||||
// AdditionalVolumes allows to add additional volumes to the Control Plane deployment.
|
||||
AdditionalVolumes []corev1.Volume `json:"additionalVolumes,omitempty"`
|
||||
// AdditionalVolumeMounts allows to mount an additional volume into each component of the Control Plane
|
||||
// (kube-apiserver, controller-manager, and scheduler).
|
||||
AdditionalVolumeMounts *AdditionalVolumeMounts `json:"additionalVolumeMounts,omitempty"`
|
||||
}
|
||||
|
||||
// AdditionalVolumeMounts allows mounting additional volumes to the Control Plane components.
|
||||
type AdditionalVolumeMounts struct {
|
||||
APIServer []corev1.VolumeMount `json:"apiServer,omitempty"`
|
||||
ControllerManager []corev1.VolumeMount `json:"controllerManager,omitempty"`
|
||||
Scheduler []corev1.VolumeMount `json:"scheduler,omitempty"`
|
||||
}
|
||||
|
||||
// ControlPlaneExtraArgs allows specifying additional arguments to the Control Plane components.
|
||||
@@ -178,6 +191,9 @@ type ImageOverrideTrait struct {
|
||||
}
|
||||
|
||||
// ExtraArgs allows adding additional arguments to said component.
|
||||
// WARNING - This option can override existing konnectivity
|
||||
// parameters and cause konnectivity components to misbehave in
|
||||
// unxpected ways. Only modify if you know what you are doing.
|
||||
type ExtraArgs []string
|
||||
|
||||
type KonnectivityServerSpec struct {
|
||||
@@ -190,8 +206,8 @@ type KonnectivityServerSpec struct {
|
||||
// +kubebuilder:default=registry.k8s.io/kas-network-proxy/proxy-server
|
||||
Image string `json:"image,omitempty"`
|
||||
// Resources define the amount of CPU and memory to allocate to the Konnectivity server.
|
||||
Resources *ComponentResourceRequirements `json:"resources,omitempty"`
|
||||
ExtraArgs ExtraArgs `json:"extraArgs,omitempty"`
|
||||
Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
|
||||
ExtraArgs ExtraArgs `json:"extraArgs,omitempty"`
|
||||
}
|
||||
|
||||
type KonnectivityAgentSpec struct {
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/clastix/kamaji/internal/upgrade"
|
||||
)
|
||||
|
||||
//+kubebuilder:webhook:path=/mutate-kamaji-clastix-io-v1alpha1-tenantcontrolplane,mutating=true,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=tenantcontrolplanes,verbs=create;update,versions=v1alpha1,name=mtenantcontrolplane.kb.io,admissionReviewVersions=v1
|
||||
//+kubebuilder:webhook:path=/validate-kamaji-clastix-io-v1alpha1-tenantcontrolplane,mutating=false,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=tenantcontrolplanes,verbs=create;update,versions=v1alpha1,name=vtenantcontrolplane.kb.io,admissionReviewVersions=v1
|
||||
|
||||
func (in *TenantControlPlane) SetupWebhookWithManager(mgr ctrl.Manager, datastore string) error {
|
||||
validator := &tenantControlPlaneValidator{
|
||||
client: mgr.GetClient(),
|
||||
defaultDatastore: datastore,
|
||||
log: mgr.GetLogger().WithName("tenantcontrolplane-webhook"),
|
||||
}
|
||||
|
||||
return ctrl.NewWebhookManagedBy(mgr).
|
||||
For(in).
|
||||
WithValidator(validator).
|
||||
WithDefaulter(validator).
|
||||
Complete()
|
||||
}
|
||||
|
||||
type tenantControlPlaneValidator struct {
|
||||
client client.Client
|
||||
defaultDatastore string
|
||||
log logr.Logger
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) Default(_ context.Context, obj runtime.Object) error {
|
||||
tcp, ok := obj.(*TenantControlPlane)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.TenantControlPlane")
|
||||
}
|
||||
|
||||
if len(tcp.Spec.DataStore) == 0 {
|
||||
tcp.Spec.DataStore = t.defaultDatastore
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) ValidateCreate(_ context.Context, obj runtime.Object) error {
|
||||
tcp, ok := obj.(*TenantControlPlane)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.TenantControlPlane")
|
||||
}
|
||||
|
||||
t.log.Info("validate create", "name", tcp.Name, "namespace", tcp.Namespace)
|
||||
|
||||
ver, err := semver.New(t.normalizeKubernetesVersion(tcp.Spec.Kubernetes.Version))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to parse the desired Kubernetes version")
|
||||
}
|
||||
|
||||
supportedVer, supportedErr := semver.Make(t.normalizeKubernetesVersion(upgrade.KubeadmVersion))
|
||||
if supportedErr != nil {
|
||||
return errors.Wrap(supportedErr, "unable to parse the Kamaji supported Kubernetes version")
|
||||
}
|
||||
|
||||
if ver.GT(supportedVer) {
|
||||
return fmt.Errorf("unable to create a TenantControlPlane with a Kubernetes version greater than the supported one, actually %s", supportedVer.String())
|
||||
}
|
||||
|
||||
if err = t.validatePreferredKubeletAddressTypes(tcp.Spec.Kubernetes.Kubelet.PreferredAddressTypes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error {
|
||||
old, ok := oldObj.(*TenantControlPlane)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.TenantControlPlane")
|
||||
}
|
||||
|
||||
tcp, ok := newObj.(*TenantControlPlane)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.TenantControlPlane")
|
||||
}
|
||||
|
||||
t.log.Info("validate update", "name", tcp.Name, "namespace", tcp.Namespace)
|
||||
|
||||
if err := t.validateVersionUpdate(old, tcp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.validateDataStore(ctx, old, tcp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.validatePreferredKubeletAddressTypes(tcp.Spec.Kubernetes.Kubelet.PreferredAddressTypes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) ValidateDelete(context.Context, runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) validatePreferredKubeletAddressTypes(addressTypes []KubeletPreferredAddressType) error {
|
||||
s := sets.NewString()
|
||||
|
||||
for _, at := range addressTypes {
|
||||
if s.Has(string(at)) {
|
||||
return fmt.Errorf("preferred kubelet address types is stated multiple times: %s", at)
|
||||
}
|
||||
|
||||
s.Insert(string(at))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) validateVersionUpdate(oldObj, newObj *TenantControlPlane) error {
|
||||
oldVer, oldErr := semver.Make(t.normalizeKubernetesVersion(oldObj.Spec.Kubernetes.Version))
|
||||
if oldErr != nil {
|
||||
return errors.Wrap(oldErr, "unable to parse the previous Kubernetes version")
|
||||
}
|
||||
|
||||
newVer, newErr := semver.New(t.normalizeKubernetesVersion(newObj.Spec.Kubernetes.Version))
|
||||
if newErr != nil {
|
||||
return errors.Wrap(newErr, "unable to parse the desired Kubernetes version")
|
||||
}
|
||||
|
||||
supportedVer, supportedErr := semver.Make(t.normalizeKubernetesVersion(upgrade.KubeadmVersion))
|
||||
if supportedErr != nil {
|
||||
return errors.Wrap(supportedErr, "unable to parse the Kamaji supported Kubernetes version")
|
||||
}
|
||||
|
||||
switch {
|
||||
case newVer.GT(supportedVer):
|
||||
return fmt.Errorf("unable to upgrade to a version greater than the supported one, actually %s", supportedVer.String())
|
||||
case newVer.LT(oldVer):
|
||||
return fmt.Errorf("unable to downgrade a TenantControlPlane from %s to %s", oldVer.String(), newVer.String())
|
||||
case newVer.Minor-oldVer.Minor > 1:
|
||||
return fmt.Errorf("unable to upgrade to a minor version in a non-sequential mode")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) validateDataStore(ctx context.Context, oldObj, tcp *TenantControlPlane) error {
|
||||
if oldObj.Spec.DataStore == tcp.Spec.DataStore {
|
||||
return nil
|
||||
}
|
||||
|
||||
previousDatastore, desiredDatastore := &DataStore{}, &DataStore{}
|
||||
|
||||
if err := t.client.Get(ctx, types.NamespacedName{Name: oldObj.Spec.DataStore}, previousDatastore); err != nil {
|
||||
return fmt.Errorf("unable to retrieve old DataStore for validation: %w", err)
|
||||
}
|
||||
|
||||
if err := t.client.Get(ctx, types.NamespacedName{Name: tcp.Spec.DataStore}, desiredDatastore); err != nil {
|
||||
return fmt.Errorf("unable to retrieve old DataStore for validation: %w", err)
|
||||
}
|
||||
|
||||
if previousDatastore.Spec.Driver != desiredDatastore.Spec.Driver {
|
||||
return fmt.Errorf("migration between different Datastore drivers is not supported")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) normalizeKubernetesVersion(input string) string {
|
||||
if strings.HasPrefix(input, "v") {
|
||||
return strings.Replace(input, "v", "", 1)
|
||||
}
|
||||
|
||||
return input
|
||||
}
|
||||
@@ -28,9 +28,10 @@ func (c CGroupDriver) String() string {
|
||||
}
|
||||
|
||||
const (
|
||||
ServiceTypeLoadBalancer = (ServiceType)(corev1.ServiceTypeLoadBalancer)
|
||||
ServiceTypeClusterIP = (ServiceType)(corev1.ServiceTypeClusterIP)
|
||||
ServiceTypeNodePort = (ServiceType)(corev1.ServiceTypeNodePort)
|
||||
ServiceTypeLoadBalancer = (ServiceType)(corev1.ServiceTypeLoadBalancer)
|
||||
ServiceTypeClusterIP = (ServiceType)(corev1.ServiceTypeClusterIP)
|
||||
ServiceTypeNodePort = (ServiceType)(corev1.ServiceTypeNodePort)
|
||||
KubeconfigSecretKeyAnnotation = "kamaji.clastix.io/kubeconfig-secret-key"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=ClusterIP;NodePort;LoadBalancer
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
admissionv1beta1 "k8s.io/api/admission/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
//+kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var (
|
||||
cfg *rest.Config
|
||||
k8sClient client.Client
|
||||
testEnv *envtest.Environment
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
|
||||
func TestAPIs(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecs(t, "Webhook Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: false,
|
||||
WebhookInstallOptions: envtest.WebhookInstallOptions{
|
||||
Paths: []string{filepath.Join("..", "..", "config", "webhook")},
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
err = AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = admissionv1beta1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
//+kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
|
||||
// start webhook server using Manager
|
||||
webhookInstallOptions := &testEnv.WebhookInstallOptions
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Host: webhookInstallOptions.LocalServingHost,
|
||||
Port: webhookInstallOptions.LocalServingPort,
|
||||
CertDir: webhookInstallOptions.LocalServingCertDir,
|
||||
LeaderElection: false,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = (&TenantControlPlane{}).SetupWebhookWithManager(mgr, "")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = (&DataStore{}).SetupWebhookWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
//+kubebuilder:scaffold:webhook
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
err = mgr.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
// wait for the webhook server to get ready
|
||||
dialer := &net.Dialer{Timeout: time.Second}
|
||||
addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort)
|
||||
Eventually(func() error {
|
||||
conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
return nil
|
||||
}).Should(Succeed())
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
cancel()
|
||||
By("tearing down the test environment")
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
@@ -10,7 +10,7 @@ package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
@@ -58,6 +58,42 @@ func (in *AdditionalMetadata) DeepCopy() *AdditionalMetadata {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AdditionalVolumeMounts) DeepCopyInto(out *AdditionalVolumeMounts) {
|
||||
*out = *in
|
||||
if in.APIServer != nil {
|
||||
in, out := &in.APIServer, &out.APIServer
|
||||
*out = make([]v1.VolumeMount, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ControllerManager != nil {
|
||||
in, out := &in.ControllerManager, &out.ControllerManager
|
||||
*out = make([]v1.VolumeMount, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Scheduler != nil {
|
||||
in, out := &in.Scheduler, &out.Scheduler
|
||||
*out = make([]v1.VolumeMount, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalVolumeMounts.
|
||||
func (in *AdditionalVolumeMounts) DeepCopy() *AdditionalVolumeMounts {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AdditionalVolumeMounts)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AddonSpec) DeepCopyInto(out *AddonSpec) {
|
||||
*out = *in
|
||||
@@ -254,35 +290,6 @@ func (in *ClientCertificate) DeepCopy() *ClientCertificate {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ComponentResourceRequirements) DeepCopyInto(out *ComponentResourceRequirements) {
|
||||
*out = *in
|
||||
if in.Limits != nil {
|
||||
in, out := &in.Limits, &out.Limits
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.Requests != nil {
|
||||
in, out := &in.Requests, &out.Requests
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentResourceRequirements.
|
||||
func (in *ComponentResourceRequirements) DeepCopy() *ComponentResourceRequirements {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ComponentResourceRequirements)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ContentRef) DeepCopyInto(out *ContentRef) {
|
||||
*out = *in
|
||||
@@ -335,17 +342,22 @@ func (in *ControlPlaneComponentsResources) DeepCopyInto(out *ControlPlaneCompone
|
||||
*out = *in
|
||||
if in.APIServer != nil {
|
||||
in, out := &in.APIServer, &out.APIServer
|
||||
*out = new(ComponentResourceRequirements)
|
||||
*out = new(v1.ResourceRequirements)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ControllerManager != nil {
|
||||
in, out := &in.ControllerManager, &out.ControllerManager
|
||||
*out = new(ComponentResourceRequirements)
|
||||
*out = new(v1.ResourceRequirements)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Scheduler != nil {
|
||||
in, out := &in.Scheduler, &out.Scheduler
|
||||
*out = new(ComponentResourceRequirements)
|
||||
*out = new(v1.ResourceRequirements)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Kine != nil {
|
||||
in, out := &in.Kine, &out.Kine
|
||||
*out = new(v1.ResourceRequirements)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
@@ -547,9 +559,25 @@ func (in *DataStoreStatus) DeepCopy() *DataStoreStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DatastoreUsedSecret) DeepCopyInto(out *DatastoreUsedSecret) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatastoreUsedSecret.
|
||||
func (in *DatastoreUsedSecret) DeepCopy() *DatastoreUsedSecret {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DatastoreUsedSecret)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
|
||||
*out = *in
|
||||
out.RegistrySettings = in.RegistrySettings
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
@@ -557,6 +585,7 @@ func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
in.Strategy.DeepCopyInto(&out.Strategy)
|
||||
if in.Tolerations != nil {
|
||||
in, out := &in.Tolerations, &out.Tolerations
|
||||
*out = make([]v1.Toleration, len(*in))
|
||||
@@ -587,6 +616,32 @@ func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.AdditionalMetadata.DeepCopyInto(&out.AdditionalMetadata)
|
||||
if in.AdditionalInitContainers != nil {
|
||||
in, out := &in.AdditionalInitContainers, &out.AdditionalInitContainers
|
||||
*out = make([]v1.Container, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.AdditionalContainers != nil {
|
||||
in, out := &in.AdditionalContainers, &out.AdditionalContainers
|
||||
*out = make([]v1.Container, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.AdditionalVolumes != nil {
|
||||
in, out := &in.AdditionalVolumes, &out.AdditionalVolumes
|
||||
*out = make([]v1.Volume, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.AdditionalVolumeMounts != nil {
|
||||
in, out := &in.AdditionalVolumeMounts, &out.AdditionalVolumeMounts
|
||||
*out = new(AdditionalVolumeMounts)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
|
||||
@@ -757,7 +812,7 @@ func (in *KonnectivityServerSpec) DeepCopyInto(out *KonnectivityServerSpec) {
|
||||
*out = *in
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = new(ComponentResourceRequirements)
|
||||
*out = new(v1.ResourceRequirements)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ExtraArgs != nil {
|
||||
@@ -901,6 +956,11 @@ func (in *KubeconfigsStatus) DeepCopy() *KubeconfigsStatus {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeletSpec) DeepCopyInto(out *KubeletSpec) {
|
||||
*out = *in
|
||||
if in.PreferredAddressTypes != nil {
|
||||
in, out := &in.PreferredAddressTypes, &out.PreferredAddressTypes
|
||||
*out = make([]KubeletPreferredAddressType, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletSpec.
|
||||
@@ -965,7 +1025,7 @@ func (in *KubernetesServiceStatus) DeepCopy() *KubernetesServiceStatus {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubernetesSpec) DeepCopyInto(out *KubernetesSpec) {
|
||||
*out = *in
|
||||
out.Kubelet = in.Kubelet
|
||||
in.Kubelet.DeepCopyInto(&out.Kubelet)
|
||||
if in.AdmissionControllers != nil {
|
||||
in, out := &in.AdmissionControllers, &out.AdmissionControllers
|
||||
*out = make(AdmissionControllers, len(*in))
|
||||
@@ -1067,6 +1127,21 @@ func (in *PublicKeyPrivateKeyPairStatus) DeepCopy() *PublicKeyPrivateKeyPairStat
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RegistrySettings) DeepCopyInto(out *RegistrySettings) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySettings.
|
||||
func (in *RegistrySettings) DeepCopy() *RegistrySettings {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RegistrySettings)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||
*out = *in
|
||||
@@ -1233,3 +1308,18 @@ func (in *TenantControlPlaneStatus) DeepCopy() *TenantControlPlaneStatus {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TenantControlPlaneStatusDataStore) DeepCopyInto(out *TenantControlPlaneStatusDataStore) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantControlPlaneStatusDataStore.
|
||||
func (in *TenantControlPlaneStatusDataStore) DeepCopy() *TenantControlPlaneStatusDataStore {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TenantControlPlaneStatusDataStore)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
|
Before Width: | Height: | Size: 12 KiB |
@@ -1 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" role="img" viewBox="11.85 8.10 202.80 187.55"><title>Kamaji</title><path d="M32.1 13.7c-2.4.9-6.3 3.5-8.6 5.8-7.7 7.7-7.5 5-7.5 82.5 0 77.4-.2 74.8 7.5 82.5 7.7 7.8 4.2 7.5 90 7.5s82.3.3 90-7.5c7.7-7.7 7.5-5.1 7.5-82.5s.2-74.8-7.5-82.5c-7.8-7.8-4.1-7.5-90.4-7.4-66.7 0-77.2.3-81 1.6zm160.5 9.9c1.9.9 4.4 3.1 5.7 4.8l2.2 3.1v141l-2.2 3.1c-4.8 6.7-1.1 6.4-84.8 6.4s-80 .3-84.8-6.4l-2.2-3.1v-141l2.2-3.1c4.8-6.6.8-6.4 84.6-6.4 68 0 76.3.2 79.3 1.6z"/><path d="M90.1 33.7c-5.1 2.5-7.3 6.7-6.8 13.1.3 4.1 1 5.9 3.3 8.4s2.5 3 .9 2.3c-2-.7-25.1-4.6-29-4.9-1.1 0-2 .5-2 1.4 0 1.1-1.2 1.5-4.9 1.5-6.7 0-6.8 1.9-.4 4 8.2 2.7 9 3.4 3.3 3.5-5.3 0-8.2 1.1-7.1 2.8.7 1.2-2.7 2.2-8.1 2.2-7 0-6.5 2.4 1.1 5.1l3.9 1.4-2.9.5c-4.3.8-3.2 2.3 2.8 4.1l5.3 1.5-5.2 2.7c-8.2 4.2-8.3 5.8-.4 6.1 5.6.2 7.3 1.1 4.2 2.1-2.3.7-2.8 3.1-.9 3.7.7.3-.5 2-2.8 4-5.6 5.3-4 6.4 6.2 4.5 4.4-.8 8.1-1.3 8.3-1.2.2.2-1.3 2.4-3.3 4.8-2 2.4-3.6 4.7-3.6 5.2 0 .4 1.4.5 3 .3 2.9-.4 4 .5 2 1.7-.5.3-1 1.3-1 2.2 0 1.6 2.2 1.5 6.5-.3 1.7-.7 1.6-.2-.9 3-5.4 7.2.7 6.5 13.6-1.4 2.7-1.7 5.1-3 5.4-3 .3 0-.9 2.1-2.7 4.6-4.5 6.6-2.5 7.9 3.7 2.3 4.6-4.3 4.7-4.3 3-1.2-1.9 3.8-2.1 5.6-.4 5.1.6-.2 7.1-7.1 14.3-15.4 7.2-8.2 13.7-14.9 14.5-14.9.8 0 7.3 6.7 14.6 15 7.2 8.2 13.7 15.1 14.3 15.3 1.6.5 1.4-1.4-.5-5-1.6-3.2-1.6-3.2 3.2 1 6 5.1 7.8 4 3.5-2.2-1.8-2.5-3-4.6-2.7-4.6.3 0 2.7 1.3 5.4 3 12.9 7.9 19 8.6 13.6 1.4-2.5-3.2-2.6-3.7-.9-3 5.9 2.5 7.7 1.7 5.6-2.3-.9-1.5-.6-1.7 2-1.3 3.8.6 3.7-.5-.7-5.7-2-2.3-3.5-4.4-3.2-4.6.2-.2 2.1 0 4.3.4 13.9 3 16.4 1.8 9.8-4.3-2.1-1.9-3.2-3.6-2.5-3.6 2 0 1.4-2.8-.9-3.5-3.2-1-1.3-2 4.2-2.1 7.9-.2 7.8-1.9-.4-6.1l-5.2-2.7 5.4-1.6c6.4-1.8 7.9-4 2.9-4.1h-3.3l3.9-1.5c7.3-2.6 8.4-5.4 2.2-5.4-5.1 0-9.6-1.1-9-2.2 1.1-1.7-1.8-2.8-7.1-2.8-5.7-.1-4.9-.8 3.3-3.5 6.4-2.1 6.3-4-.4-4-3.7 0-4.9-.4-4.9-1.5 0-.9-.9-1.4-2-1.4-3.9.3-27 4.2-29 4.9-1.6.7-1.4.2.9-2.3 3.7-4 4.7-11.3 2.2-16.1-4.8-9.2-18.8-9.3-23.8 0-4.4 8.3.2 18.4 9.5 20.5 3 .6 2.8.8-5.5 4l-8.8 3.3-8.7-3.3c-8.1-3.2-8.4-3.4-5.5-4.1 1.7-.3 4.3-1.5 5.7-2.7 13.1-10.3.6-30.4-14.4-23.1zm77.6 98.4c-3.6 2.1-.8 7.7 3.2 6.4 2.1-.6 3.5-3.1 2.5-4.6-1.1-1.8-4-2.7-5.7-1.8zm8.3 3.9c0 1.9.5 2.1 6.3 1.8 4.7-.2 6.2-.7 6.2-1.8s-1.5-1.6-6.2-1.8c-5.8-.3-6.3-.1-6.3 1.8zm-135.6.3c-.2.7-.3 7.4-.2 14.8l.3 13.4 3.3.3c3.1.3 3.2.2 3.2-3.4 0-2.5.7-4.6 2.1-6l2.1-2.3 5 6c3.9 4.7 5.6 5.9 7.8 5.9 1.6 0 3.1-.3 3.3-.8.3-.4-2.1-4-5.4-8.1-3.2-4-5.9-7.6-5.9-8 0-.4 2.5-3.1 5.5-6.1 3-3 5.5-5.8 5.5-6.2 0-.4-1.5-.8-3.3-.8-2.8 0-4.4 1-9.6 6.5-3.5 3.6-6.5 6.5-6.7 6.5-.2 0-.4-2.9-.4-6.5V135h-3c-1.7 0-3.3.6-3.6 1.3zm31.2 7c-1.1.8-1.5 1.9-1 3 .5 1.4 1.3 1.6 4 1.1 4.2-.8 8.4.2 8.4 2 0 .8-1.8 1.5-5.1 1.9-6 .7-8.9 2.9-8.9 6.6 0 3.2.8 4.4 3.7 6 2.9 1.5 5.2 1.4 8.6-.3 2.3-1.3 2.7-1.3 2.7 0 0 .9 1.1 1.4 3 1.4h3v-8.6c0-8.1-.1-8.7-2.9-11.5-2.5-2.5-3.7-2.9-8.3-2.9-3 0-6.2.6-7.2 1.3zm11.2 13.9c-.2 1.7-1.1 2.4-3.2 2.6-3.3.4-5.1-1-4.3-3.2.4-1.1 1.9-1.6 4.2-1.6 3.2 0 3.6.3 3.3 2.2zm13.4-4l.3 11.3h6l.5-7.8c.5-7.6 1.5-9.6 4.7-9.7 3 0 4.3 3.2 4.3 10.6v7.4h3c3 0 3 0 3-5.9 0-7.3 1.2-10.7 4.1-11.6 3.8-1.3 5.9 2.5 5.9 10.6v6.9h6v-9c0-8.3-.2-9.3-2.5-11.5-2.9-3-9.8-3.5-12.7-.8-1.7 1.5-1.9 1.5-3.6 0-2.2-2-9.2-2.3-11.1-.5-1.1 1-1.4 1-1.8 0-.3-.6-1.8-1.2-3.4-1.2h-3l.3 11.2zm45.4-9.9c-1.1.8-1.5 1.9-1 3 .5 1.4 1.3 1.6 4 1.1 4.2-.8 8.4.2 8.4 2 0 .8-1.8 1.5-5.1 1.9-6 .7-8.9 2.9-8.9 6.6 0 3.2.8 4.4 3.7 6 2.9 1.5 5.2 1.4 8.6-.3 2.3-1.3 2.7-1.3 2.7 0 0 .9 1.1 1.4 3 1.4h3v-8.6c0-8.1-.1-8.7-2.9-11.5-2.5-2.5-3.7-2.9-8.3-2.9-3 0-6.2.6-7.2 1.3zm11.2 13.9c-.2 1.7-1.1 2.4-3.2 2.6-3.3.4-5.1-1-4.3-3.2.4-1.1 1.9-1.6 4.2-1.6 3.2 0 3.6.3 3.3 2.2zm13-2.5c-.3 12.8-.3 12.8-2.7 12.8-1.5 0-2.7.8-3.1 2-2 5.4 9.4 4.3 11.9-1.2.6-1.3 1.1-7.7 1.1-14.3v-12h-6.9l-.3 12.7zm13.4-1.5l.3 11.3h6v-22l-3.3-.3-3.3-.3.3 11.3z"/></svg>
|
||||
|
Before Width: | Height: | Size: 3.6 KiB |
BIN
assets/logo-black.png
Normal file
|
After Width: | Height: | Size: 8.7 KiB |
BIN
assets/logo-colored.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
assets/logo-white.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
1
assets/logo.svg
Normal file
|
After Width: | Height: | Size: 5.1 KiB |
|
Before Width: | Height: | Size: 119 KiB |
@@ -1,24 +1,24 @@
|
||||
apiVersion: v2
|
||||
appVersion: v0.2.2
|
||||
description: Kamaji is a tool aimed to build and operate a Managed Kubernetes Service
|
||||
with a fraction of the operational burden. With Kamaji, you can deploy and operate
|
||||
hundreds of Kubernetes clusters as a hyper-scaler.
|
||||
appVersion: v0.5.1
|
||||
description: Kamaji is the Hosted Control Plane Manager for Kubernetes.
|
||||
home: https://github.com/clastix/kamaji
|
||||
icon: https://github.com/clastix/kamaji/raw/master/assets/kamaji-logo.png
|
||||
icon: https://github.com/clastix/kamaji/raw/master/assets/logo-colored.png
|
||||
kubeVersion: ">=1.21.0-0"
|
||||
maintainers:
|
||||
- email: dario@tranchitella.eu
|
||||
name: Dario Tranchitella
|
||||
url: https://clastix.io
|
||||
- email: me@maxgio.it
|
||||
name: Massimiliano Giovagnoli
|
||||
- email: me@bsctl.io
|
||||
name: Adriano Pezzuto
|
||||
url: https://clastix.io
|
||||
name: kamaji
|
||||
sources:
|
||||
- https://github.com/clastix/kamaji
|
||||
type: application
|
||||
version: 0.11.4
|
||||
version: 0.15.3
|
||||
annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
catalog.cattle.io/release-name: kamaji
|
||||
catalog.cattle.io/display-name: Kamaji - Managed Kubernetes Service
|
||||
catalog.cattle.io/display-name: Kamaji
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
# kamaji
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler.
|
||||
Kamaji is the Hosted Control Plane Manager for Kubernetes.
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Name | Email | Url |
|
||||
| ---- | ------ | --- |
|
||||
| Dario Tranchitella | <dario@tranchitella.eu> | |
|
||||
| Dario Tranchitella | <dario@tranchitella.eu> | <https://clastix.io> |
|
||||
| Massimiliano Giovagnoli | <me@maxgio.it> | |
|
||||
| Adriano Pezzuto | <me@bsctl.io> | |
|
||||
| Adriano Pezzuto | <me@bsctl.io> | <https://clastix.io> |
|
||||
|
||||
## Source Code
|
||||
|
||||
@@ -66,6 +66,8 @@ Here the values you can override:
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| affinity | object | `{}` | Kubernetes affinity rules to apply to Kamaji controller pods |
|
||||
| cfssl.image.repository | string | `"cfssl/cfssl"` | |
|
||||
| cfssl.image.tag | string | `"latest"` | |
|
||||
| datastore.basicAuth.passwordSecret.keyPath | string | `nil` | The Secret key where the data is stored. |
|
||||
| datastore.basicAuth.passwordSecret.name | string | `nil` | The name of the Secret containing the password used to connect to the relational database. |
|
||||
| datastore.basicAuth.passwordSecret.namespace | string | `nil` | The namespace of the Secret containing the password used to connect to the relational database. |
|
||||
@@ -73,8 +75,9 @@ Here the values you can override:
|
||||
| datastore.basicAuth.usernameSecret.name | string | `nil` | The name of the Secret containing the username used to connect to the relational database. |
|
||||
| datastore.basicAuth.usernameSecret.namespace | string | `nil` | The namespace of the Secret containing the username used to connect to the relational database. |
|
||||
| datastore.driver | string | `"etcd"` | (string) The Kamaji Datastore driver, supported: etcd, MySQL, PostgreSQL (defaults=etcd). |
|
||||
| datastore.enabled | bool | `true` | (bool) Enable the Kamaji Datastore creation (default=true) |
|
||||
| datastore.endpoints | list | `[]` | (array) List of endpoints of the selected Datastore. When letting the Chart install the etcd datastore, this field is populated automatically. |
|
||||
| datastore.nameOverride | string | `nil` | The Datastore name override, if empty defaults to `default` |
|
||||
| datastore.nameOverride | string | `nil` | The Datastore name override, if empty and enabled=true defaults to `default`, if enabled=false, this is the name of the Datastore to connect to. |
|
||||
| datastore.tlsConfig.certificateAuthority.certificate.keyPath | string | `nil` | Key of the Secret which contains the content of the certificate. |
|
||||
| datastore.tlsConfig.certificateAuthority.certificate.name | string | `nil` | Name of the Secret containing the CA required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
| datastore.tlsConfig.certificateAuthority.certificate.namespace | string | `nil` | Namespace of the Secret containing the CA required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
@@ -100,10 +103,11 @@ Here the values you can override:
|
||||
| etcd.persistence.accessModes[0] | string | `"ReadWriteOnce"` | |
|
||||
| etcd.persistence.customAnnotations | object | `{}` | The custom annotations to add to the PVC |
|
||||
| etcd.persistence.size | string | `"10Gi"` | |
|
||||
| etcd.persistence.storageClass | string | `""` | |
|
||||
| etcd.persistence.storageClassName | string | `""` | |
|
||||
| etcd.port | int | `2379` | The client request port. |
|
||||
| etcd.serviceAccount.create | bool | `true` | Create a ServiceAccount, required to install and provision the etcd backing storage (default: true) |
|
||||
| etcd.serviceAccount.name | string | `""` | Define the ServiceAccount name to use during the setup and provision of the etcd backing storage (default: "") |
|
||||
| etcd.tolerations | list | `[]` | (array) Kubernetes affinity rules to apply to Kamaji etcd pods |
|
||||
| extraArgs | list | `[]` | A list of extra arguments to add to the kamaji controller default ones |
|
||||
| fullnameOverride | string | `""` | |
|
||||
| healthProbeBindAddress | string | `":8081"` | The address the probe endpoint binds to. (default ":8081") |
|
||||
|
||||
@@ -1,30 +1,12 @@
|
||||
# Kamaji - Managed Kubernetes Service
|
||||
# Kamaji
|
||||
|
||||
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden.
|
||||
Kamaji deploys and operates Kubernetes at scale with a fraction of the operational burden.
|
||||
|
||||
Useful links:
|
||||
- [Kamaji Github repository](https://github.com/clastix/kamaji)
|
||||
- [Kamaji Documentation](https://github.com/clastix/kamaji/docs/)
|
||||
- [Kamaji Documentation](https://kamaji.clastix.io)
|
||||
|
||||
## Requirements
|
||||
|
||||
* Kubernetes v1.22+
|
||||
* Helm v3
|
||||
|
||||
# Installation
|
||||
|
||||
To install the Chart with the release name `kamaji`:
|
||||
|
||||
helm upgrade --install --namespace kamaji-system --create-namespace clastix/kamaji
|
||||
|
||||
Show the status:
|
||||
|
||||
helm status kamaji -n kamaji-system
|
||||
|
||||
Upgrade the Chart
|
||||
|
||||
helm upgrade kamaji -n kamaji-system clastix/kamaji
|
||||
|
||||
Uninstall the Chart
|
||||
|
||||
helm uninstall kamaji -n kamaji-system
|
||||
* Helm v3
|
||||
@@ -4,7 +4,7 @@ kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: kamaji-system/kamaji-serving-cert
|
||||
controller-gen.kubebuilder.io/version: v0.9.2
|
||||
controller-gen.kubebuilder.io/version: v0.11.4
|
||||
name: datastores.kamaji.clastix.io
|
||||
spec:
|
||||
group: kamaji.clastix.io
|
||||
@@ -30,10 +30,19 @@ spec:
|
||||
description: DataStore is the Schema for the datastores API.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
@@ -41,18 +50,24 @@ spec:
|
||||
description: DataStoreSpec defines the desired state of DataStore.
|
||||
properties:
|
||||
basicAuth:
|
||||
description: In case of authentication enabled for the given data store, specifies the username and password pair. This value is optional.
|
||||
description: |-
|
||||
In case of authentication enabled for the given data store, specifies the username and password pair.
|
||||
This value is optional.
|
||||
properties:
|
||||
password:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -69,13 +84,17 @@ spec:
|
||||
username:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -101,7 +120,9 @@ spec:
|
||||
- PostgreSQL
|
||||
type: string
|
||||
endpoints:
|
||||
description: List of the endpoints to connect to the shared datastore. No need for protocol, just bare IP/FQDN and port.
|
||||
description: |-
|
||||
List of the endpoints to connect to the shared datastore.
|
||||
No need for protocol, just bare IP/FQDN and port.
|
||||
items:
|
||||
type: string
|
||||
minItems: 1
|
||||
@@ -110,18 +131,24 @@ spec:
|
||||
description: Defines the TLS/SSL configuration required to connect to the data store in a secure way.
|
||||
properties:
|
||||
certificateAuthority:
|
||||
description: Retrieve the Certificate Authority certificate and private key, such as bare content of the file, or a SecretReference. The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
|
||||
description: |-
|
||||
Retrieve the Certificate Authority certificate and private key, such as bare content of the file, or a SecretReference.
|
||||
The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
|
||||
properties:
|
||||
certificate:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -138,13 +165,17 @@ spec:
|
||||
privateKey:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -167,13 +198,17 @@ spec:
|
||||
certificate:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -190,13 +225,17 @@ spec:
|
||||
privateKey:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
|
||||
@@ -2,7 +2,11 @@
|
||||
Create a default fully qualified datastore name.
|
||||
*/}}
|
||||
{{- define "datastore.fullname" -}}
|
||||
{{- if .Values.datastore.enabled }}
|
||||
{{- default "default" .Values.datastore.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- required "A valid .Values.datastore.nameOverride required!" .Values.datastore.nameOverride }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{{- if .Values.datastore.enabled}}
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: DataStore
|
||||
metadata:
|
||||
@@ -24,3 +25,4 @@ spec:
|
||||
{{- include "datastore.certificateAuthority" . | indent 6 }}
|
||||
clientCertificate:
|
||||
{{- include "datastore.clientCertificate" . | indent 6 }}
|
||||
{{- end}}
|
||||
|
||||
@@ -30,11 +30,15 @@ spec:
|
||||
- bash
|
||||
- -c
|
||||
- |-
|
||||
etcdctl member list -w table &&
|
||||
etcdctl user add --no-password=true root &&
|
||||
etcdctl role add root &&
|
||||
etcdctl user grant-role root root &&
|
||||
etcdctl auth enable
|
||||
etcdctl member list -w table
|
||||
if etcdctl user get root &>/dev/null; then
|
||||
echo "User already exists, nothing to do"
|
||||
else
|
||||
etcdctl user add --no-password=true root &&
|
||||
etcdctl role add root &&
|
||||
etcdctl user grant-role root root &&
|
||||
etcdctl auth enable
|
||||
fi
|
||||
env:
|
||||
- name: ETCDCTL_ENDPOINTS
|
||||
value: https://etcd-0.{{ include "etcd.serviceName" . }}.{{ .Release.Namespace }}.svc.cluster.local:2379
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
restartPolicy: Never
|
||||
initContainers:
|
||||
- name: cfssl
|
||||
image: cfssl/cfssl:latest
|
||||
image: "{{ .Values.cfssl.image.repository }}:{{ .Values.cfssl.image.tag }}"
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
@@ -37,13 +37,21 @@ spec:
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |-
|
||||
kubectl --namespace={{ .Release.Namespace }} delete secret --ignore-not-found=true {{ include "etcd.caSecretName" . }} {{ include "etcd.clientSecretName" . }} &&
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret generic {{ include "etcd.caSecretName" . }} --from-file=/certs/ca.crt --from-file=/certs/ca.key --from-file=/certs/peer-key.pem --from-file=/certs/peer.pem --from-file=/certs/server-key.pem --from-file=/certs/server.pem &&
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret tls {{ include "etcd.clientSecretName" . }} --key=/certs/root-client-key.pem --cert=/certs/root-client.pem
|
||||
command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
if kubectl get secret {{ include "etcd.caSecretName" . }} --namespace={{ .Release.Namespace }} &>/dev/null; then
|
||||
echo "Secret {{ include "etcd.caSecretName" . }} already exists"
|
||||
else
|
||||
echo "Creating secret {{ include "etcd.caSecretName" . }}"
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret generic {{ include "etcd.caSecretName" . }} --from-file=/certs/ca.crt --from-file=/certs/ca.key --from-file=/certs/peer-key.pem --from-file=/certs/peer.pem --from-file=/certs/server-key.pem --from-file=/certs/server.pem
|
||||
fi
|
||||
if kubectl get secret {{ include "etcd.clientSecretName" . }} --namespace={{ .Release.Namespace }} &>/dev/null; then
|
||||
echo "Secret {{ include "etcd.clientSecretName" . }} already exists"
|
||||
else
|
||||
echo "Creating secret {{ include "etcd.clientSecretName" . }}"
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret tls {{ include "etcd.clientSecretName" . }} --key=/certs/root-client-key.pem --cert=/certs/root-client.pem
|
||||
fi
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: certs
|
||||
|
||||
@@ -15,6 +15,7 @@ rules:
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- delete
|
||||
resourceNames:
|
||||
- {{ include "etcd.caSecretName" . }}
|
||||
|
||||
@@ -22,6 +22,10 @@ spec:
|
||||
- name: certs
|
||||
secret:
|
||||
secretName: {{ include "etcd.caSecretName" . }}
|
||||
{{- with .Values.etcd.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: etcd
|
||||
image: {{ .Values.etcd.image.repository }}:{{ .Values.etcd.image.tag | default "v3.5.4" }}
|
||||
|
||||
@@ -8,26 +8,6 @@ metadata:
|
||||
{{- include "kamaji.labels" $data | nindent 4 }}
|
||||
name: kamaji-mutating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: {{ include "kamaji.webhookServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /mutate-kamaji-clastix-io-v1alpha1-datastore
|
||||
failurePolicy: Fail
|
||||
name: mdatastore.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- datastores
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
|
||||
@@ -54,12 +54,15 @@ etcd:
|
||||
name: ""
|
||||
persistence:
|
||||
size: 10Gi
|
||||
storageClass: ""
|
||||
storageClassName: ""
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
# -- The custom annotations to add to the PVC
|
||||
customAnnotations: {}
|
||||
# volumeType: local
|
||||
|
||||
# -- (array) Kubernetes affinity rules to apply to Kamaji etcd pods
|
||||
tolerations: []
|
||||
|
||||
overrides:
|
||||
caSecret:
|
||||
@@ -157,7 +160,9 @@ loggingDevel:
|
||||
enable: false
|
||||
|
||||
datastore:
|
||||
# -- (string) The Datastore name override, if empty defaults to `default`
|
||||
# -- (bool) Enable the Kamaji Datastore creation (default=true)
|
||||
enabled: true
|
||||
# -- (string) The Datastore name override, if empty and enabled=true defaults to `default`, if enabled=false, this is the name of the Datastore to connect to.
|
||||
nameOverride:
|
||||
# -- (string) The Kamaji Datastore driver, supported: etcd, MySQL, PostgreSQL (defaults=etcd).
|
||||
driver: etcd
|
||||
@@ -209,3 +214,8 @@ datastore:
|
||||
namespace:
|
||||
# -- Key of the Secret which contains the content of the private key.
|
||||
keyPath:
|
||||
|
||||
cfssl:
|
||||
image:
|
||||
repository: cfssl/cfssl
|
||||
tag: latest
|
||||
@@ -9,39 +9,50 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
goRuntime "runtime"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
cmdutils "github.com/clastix/kamaji/cmd/utils"
|
||||
"github.com/clastix/kamaji/controllers"
|
||||
"github.com/clastix/kamaji/controllers/soot"
|
||||
"github.com/clastix/kamaji/internal"
|
||||
"github.com/clastix/kamaji/internal/builders/controlplane"
|
||||
datastoreutils "github.com/clastix/kamaji/internal/datastore/utils"
|
||||
"github.com/clastix/kamaji/internal/webhook"
|
||||
"github.com/clastix/kamaji/internal/webhook/handlers"
|
||||
"github.com/clastix/kamaji/internal/webhook/routes"
|
||||
)
|
||||
|
||||
//nolint:maintidx
|
||||
func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
// CLI flags
|
||||
var (
|
||||
metricsBindAddress string
|
||||
healthProbeBindAddress string
|
||||
leaderElect bool
|
||||
tmpDirectory string
|
||||
kineImage string
|
||||
datastore string
|
||||
managerNamespace string
|
||||
managerServiceAccountName string
|
||||
managerServiceName string
|
||||
webhookCABundle []byte
|
||||
migrateJobImage string
|
||||
maxConcurrentReconciles int
|
||||
metricsBindAddress string
|
||||
healthProbeBindAddress string
|
||||
leaderElect bool
|
||||
tmpDirectory string
|
||||
kineImage string
|
||||
controllerReconcileTimeout time.Duration
|
||||
cacheResyncPeriod time.Duration
|
||||
datastore string
|
||||
managerNamespace string
|
||||
managerServiceAccountName string
|
||||
managerServiceName string
|
||||
webhookCABundle []byte
|
||||
migrateJobImage string
|
||||
maxConcurrentReconciles int
|
||||
|
||||
webhookCAPath string
|
||||
)
|
||||
@@ -70,6 +81,10 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
if controllerReconcileTimeout.Seconds() == 0 {
|
||||
return fmt.Errorf("the controller reconcile timeout must be greater than zero")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -82,13 +97,22 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", goRuntime.GOOS, goRuntime.GOARCH))
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
MetricsBindAddress: metricsBindAddress,
|
||||
Port: 9443,
|
||||
Scheme: scheme,
|
||||
Metrics: metricsserver.Options{
|
||||
BindAddress: metricsBindAddress,
|
||||
},
|
||||
WebhookServer: ctrlwebhook.NewServer(ctrlwebhook.Options{
|
||||
Port: 9443,
|
||||
}),
|
||||
HealthProbeBindAddress: healthProbeBindAddress,
|
||||
LeaderElection: leaderElect,
|
||||
LeaderElectionNamespace: managerNamespace,
|
||||
LeaderElectionID: "799b98bc.clastix.io",
|
||||
NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) {
|
||||
opts.SyncPeriod = &cacheResyncPeriod
|
||||
|
||||
return cache.New(config, opts)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to start manager")
|
||||
@@ -96,9 +120,9 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
tcpChannel := make(controllers.TenantControlPlaneChannel)
|
||||
tcpChannel, certChannel := make(controllers.TenantControlPlaneChannel), make(controllers.CertificateChannel)
|
||||
|
||||
if err = (&controllers.DataStore{TenantControlPlaneTrigger: tcpChannel}).SetupWithManager(mgr); err != nil {
|
||||
if err = (&controllers.DataStore{Client: mgr.GetClient(), TenantControlPlaneTrigger: tcpChannel}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "DataStore")
|
||||
|
||||
return err
|
||||
@@ -108,10 +132,12 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
Client: mgr.GetClient(),
|
||||
APIReader: mgr.GetAPIReader(),
|
||||
Config: controllers.TenantControlPlaneReconcilerConfig{
|
||||
ReconcileTimeout: controllerReconcileTimeout,
|
||||
DefaultDataStoreName: datastore,
|
||||
KineContainerImage: kineImage,
|
||||
TmpBaseDirectory: tmpDirectory,
|
||||
},
|
||||
CertificateChan: certChannel,
|
||||
TriggerChan: tcpChannel,
|
||||
KamajiNamespace: managerNamespace,
|
||||
KamajiServiceAccount: managerServiceAccountName,
|
||||
@@ -126,8 +152,8 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = (&webhook.Freeze{}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to register webhook", "webhook", "Freeze")
|
||||
if err = (&controllers.CertificateLifecycle{Channel: certChannel}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "CertificateLifecycle")
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -144,13 +170,38 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = (&kamajiv1alpha1.TenantControlPlane{}).SetupWebhookWithManager(mgr, datastore); err != nil {
|
||||
setupLog.Error(err, "unable to create webhook", "webhook", "TenantControlPlane")
|
||||
|
||||
return err
|
||||
}
|
||||
if err = (&kamajiv1alpha1.DataStore{}).SetupWebhookWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create webhook", "webhook", "DataStore")
|
||||
err = webhook.Register(mgr, map[routes.Route][]handlers.Handler{
|
||||
routes.TenantControlPlaneMigrate{}: {
|
||||
handlers.Freeze{},
|
||||
},
|
||||
routes.TenantControlPlaneDefaults{}: {
|
||||
handlers.TenantControlPlaneDefaults{DefaultDatastore: datastore},
|
||||
},
|
||||
routes.TenantControlPlaneValidate{}: {
|
||||
handlers.TenantControlPlaneName{},
|
||||
handlers.TenantControlPlaneVersion{},
|
||||
handlers.TenantControlPlaneKubeletAddresses{},
|
||||
handlers.TenantControlPlaneDataStore{Client: mgr.GetClient()},
|
||||
handlers.TenantControlPlaneDeployment{
|
||||
Client: mgr.GetClient(),
|
||||
DeploymentBuilder: controlplane.Deployment{
|
||||
Client: mgr.GetClient(),
|
||||
KineContainerImage: kineImage,
|
||||
},
|
||||
KonnectivityBuilder: controlplane.Konnectivity{
|
||||
Scheme: *mgr.GetScheme(),
|
||||
},
|
||||
},
|
||||
},
|
||||
routes.DataStoreValidate{}: {
|
||||
handlers.DataStoreValidation{Client: mgr.GetClient()},
|
||||
},
|
||||
routes.DataStoreSecrets{}: {
|
||||
handlers.DataStoreSecretValidation{Client: mgr.GetClient()},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to create webhook")
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -187,6 +238,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Setting zap logger
|
||||
zapfs := flag.NewFlagSet("zap", flag.ExitOnError)
|
||||
opts := zap.Options{
|
||||
@@ -202,12 +254,14 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
cmd.Flags().StringVar(&tmpDirectory, "tmp-directory", "/tmp/kamaji", "Directory which will be used to work with temporary files.")
|
||||
cmd.Flags().StringVar(&kineImage, "kine-image", "rancher/kine:v0.9.2-amd64", "Container image along with tag to use for the Kine sidecar container (used only if etcd-storage-type is set to one of kine strategies).")
|
||||
cmd.Flags().StringVar(&datastore, "datastore", "etcd", "The default DataStore that should be used by Kamaji to setup the required storage.")
|
||||
cmd.Flags().StringVar(&migrateJobImage, "migrate-image", fmt.Sprintf("clastix/kamaji:v%s", internal.GitTag), "Specify the container image to launch when a TenantControlPlane is migrated to a new datastore.")
|
||||
cmd.Flags().StringVar(&migrateJobImage, "migrate-image", fmt.Sprintf("clastix/kamaji:%s", internal.GitTag), "Specify the container image to launch when a TenantControlPlane is migrated to a new datastore.")
|
||||
cmd.Flags().IntVar(&maxConcurrentReconciles, "max-concurrent-tcp-reconciles", 1, "Specify the number of workers for the Tenant Control Plane controller (beware of CPU consumption)")
|
||||
cmd.Flags().StringVar(&managerNamespace, "pod-namespace", os.Getenv("POD_NAMESPACE"), "The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().StringVar(&managerServiceName, "webhook-service-name", "kamaji-webhook-service", "The Kamaji webhook server Service name which is used to get validation webhooks, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().StringVar(&managerServiceAccountName, "serviceaccount-name", os.Getenv("SERVICE_ACCOUNT"), "The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().StringVar(&webhookCAPath, "webhook-ca-path", "/tmp/k8s-webhook-server/serving-certs/ca.crt", "Path to the Manager webhook server CA, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().DurationVar(&controllerReconcileTimeout, "controller-reconcile-timeout", 30*time.Second, "The reconciliation request timeout before the controller withdraw the external resource calls, such as dealing with the Datastore, or the Tenant Control Plane API endpoint.")
|
||||
cmd.Flags().DurationVar(&cacheResyncPeriod, "cache-resync-period", 10*time.Hour, "The controller-runtime.Manager cache resync period.")
|
||||
|
||||
cobra.OnInitialize(func() {
|
||||
viper.AutomaticEnv()
|
||||
|
||||
@@ -4,14 +4,12 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
_ "go.uber.org/automaxprocs" // Automatically set `GOMAXPROCS` to match Linux container CPU quota.
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
appsv1 "k8s.io/kubernetes/pkg/apis/apps/v1"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -21,11 +19,9 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
Use: "kamaji",
|
||||
Short: "Build and operate Kubernetes at scale with a fraction of operational burden.",
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
// Seed is required to ensure non reproducibility for the certificates generate by Kamaji.
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
utilruntime.Must(kamajiv1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(appsv1.RegisterDefaults(scheme))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.9.2
|
||||
creationTimestamp: null
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: datastores.kamaji.clastix.io
|
||||
spec:
|
||||
group: kamaji.clastix.io
|
||||
@@ -30,14 +29,19 @@ spec:
|
||||
description: DataStore is the Schema for the datastores API.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
@@ -45,21 +49,24 @@ spec:
|
||||
description: DataStoreSpec defines the desired state of DataStore.
|
||||
properties:
|
||||
basicAuth:
|
||||
description: In case of authentication enabled for the given data
|
||||
store, specifies the username and password pair. This value is optional.
|
||||
description: |-
|
||||
In case of authentication enabled for the given data store, specifies the username and password pair.
|
||||
This value is optional.
|
||||
properties:
|
||||
password:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It
|
||||
has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference
|
||||
where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -78,15 +85,17 @@ spec:
|
||||
username:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It
|
||||
has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference
|
||||
where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -114,7 +123,8 @@ spec:
|
||||
- PostgreSQL
|
||||
type: string
|
||||
endpoints:
|
||||
description: List of the endpoints to connect to the shared datastore.
|
||||
description: |-
|
||||
List of the endpoints to connect to the shared datastore.
|
||||
No need for protocol, just bare IP/FQDN and port.
|
||||
items:
|
||||
type: string
|
||||
@@ -125,24 +135,24 @@ spec:
|
||||
to the data store in a secure way.
|
||||
properties:
|
||||
certificateAuthority:
|
||||
description: Retrieve the Certificate Authority certificate and
|
||||
private key, such as bare content of the file, or a SecretReference.
|
||||
The key reference is required since etcd authentication is based
|
||||
on certificates, and Kamaji is responsible in creating this.
|
||||
description: |-
|
||||
Retrieve the Certificate Authority certificate and private key, such as bare content of the file, or a SecretReference.
|
||||
The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
|
||||
properties:
|
||||
certificate:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret
|
||||
reference where the content is stored. This value
|
||||
is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -161,16 +171,17 @@ spec:
|
||||
privateKey:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret
|
||||
reference where the content is stored. This value
|
||||
is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -196,16 +207,17 @@ spec:
|
||||
certificate:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret
|
||||
reference where the content is stored. This value
|
||||
is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -224,16 +236,17 @@ spec:
|
||||
privateKey:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret
|
||||
reference where the content is stored. This value
|
||||
is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
|
||||
@@ -9,8 +9,8 @@ namespace: kamaji-system
|
||||
namePrefix: kamaji-
|
||||
|
||||
# Labels to add to all resources and selectors.
|
||||
#commonLabels:
|
||||
# someName: someValue
|
||||
commonLabels:
|
||||
cluster.x-k8s.io/provider: "kamaji-core"
|
||||
|
||||
bases:
|
||||
- ../crd
|
||||
|
||||
6001
config/install.yaml
@@ -13,4 +13,4 @@ kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: clastix/kamaji
|
||||
newTag: v0.2.2
|
||||
newTag: v0.5.1
|
||||
|
||||
@@ -30,6 +30,7 @@ spec:
|
||||
args:
|
||||
- manager
|
||||
- --leader-elect
|
||||
- --datastore=kamaji-etcd
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
|
||||
10
config/metadata.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
# maps release series of major.minor to cluster-api contract version
|
||||
# the contract version may change between minor or major versions, but *not*
|
||||
# between patch versions.
|
||||
#
|
||||
# update this file only when a new major or minor version is released
|
||||
apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3
|
||||
releaseSeries:
|
||||
- major: 0
|
||||
minor: 3
|
||||
contract: v1beta1
|
||||
@@ -2,7 +2,6 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: manager-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
|
||||
@@ -1,22 +1,24 @@
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: test
|
||||
name: k8s-126
|
||||
labels:
|
||||
tenant.clastix.io: k8s-126
|
||||
spec:
|
||||
controlPlane:
|
||||
deployment:
|
||||
replicas: 1
|
||||
replicas: 2
|
||||
service:
|
||||
serviceType: LoadBalancer
|
||||
kubernetes:
|
||||
version: "v1.25.4"
|
||||
version: "v1.26.0"
|
||||
kubelet:
|
||||
cgroupfs: cgroupfs
|
||||
admissionControllers:
|
||||
- ResourceQuota
|
||||
- LimitRanger
|
||||
cgroupfs: systemd
|
||||
networkProfile:
|
||||
port: 6443
|
||||
addons:
|
||||
coreDNS: {}
|
||||
kubeProxy: {}
|
||||
konnectivity:
|
||||
server:
|
||||
port: 8132
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: additionalcontainers
|
||||
labels:
|
||||
tenant.clastix.io: additionalcontainers
|
||||
spec:
|
||||
dataStore: postgresql-bronze
|
||||
controlPlane:
|
||||
deployment:
|
||||
replicas: 1
|
||||
additionalInitContainers:
|
||||
- name: init
|
||||
image: registry.k8s.io/e2e-test-images/busybox:1.29-4
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- echo hello world
|
||||
additionalContainers:
|
||||
- name: nginx
|
||||
image: registry.k8s.io/e2e-test-images/nginx:1.15-4
|
||||
service:
|
||||
serviceType: LoadBalancer
|
||||
kubernetes:
|
||||
version: "v1.26.0"
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
networkProfile:
|
||||
port: 6443
|
||||
addons:
|
||||
coreDNS: {}
|
||||
kubeProxy: {}
|
||||
@@ -0,0 +1,62 @@
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: additional-volumes
|
||||
labels:
|
||||
tenant.clastix.io: additional-volumes
|
||||
spec:
|
||||
controlPlane:
|
||||
deployment:
|
||||
replicas: 1
|
||||
additionalVolumes:
|
||||
- name: api-server-volume
|
||||
configMap:
|
||||
name: api-server-extra-cm
|
||||
- name: controller-manager-volume
|
||||
configMap:
|
||||
name: controller-manager-extra-cm
|
||||
- name: scheduler-volume
|
||||
configMap:
|
||||
name: scheduler-extra-cm
|
||||
additionalVolumeMounts:
|
||||
apiServer:
|
||||
- name: api-server-volume
|
||||
mountPath: "/tmp/api-server"
|
||||
controllerManager:
|
||||
- name: controller-manager-volume
|
||||
mountPath: "/tmp/controller-manager"
|
||||
scheduler:
|
||||
- name: scheduler-volume
|
||||
mountPath: "/tmp/scheduler"
|
||||
service:
|
||||
serviceType: LoadBalancer
|
||||
kubernetes:
|
||||
version: "v1.26.0"
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
networkProfile:
|
||||
port: 6443
|
||||
addons:
|
||||
coreDNS: {}
|
||||
kubeProxy: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
api-server: "This is an API Server volume"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: api-server-extra-cm
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
controller-manager: "This is a Controller Manager volume"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: controller-manager-extra-cm
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
controller-manager: "This is a Scheduler volume"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: scheduler-extra-cm
|
||||
20
config/samples/kamaji_v1alpha1_tenantcontrolplane_kine.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: kine
|
||||
labels:
|
||||
tenant.clastix.io: kine
|
||||
spec:
|
||||
addons:
|
||||
coreDNS: {}
|
||||
kubeProxy: {}
|
||||
controlPlane:
|
||||
deployment:
|
||||
replicas: 1
|
||||
service:
|
||||
serviceType: LoadBalancer
|
||||
dataStore: postgresql-bronze
|
||||
kubernetes:
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
version: v1.26.0
|
||||
@@ -0,0 +1,23 @@
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: konnectivity-addon
|
||||
labels:
|
||||
tenant.clastix.io: konnectivity-addon
|
||||
spec:
|
||||
deployment:
|
||||
replicas: 2
|
||||
service:
|
||||
serviceType: LoadBalancer
|
||||
kubernetes:
|
||||
version: "v1.26.0"
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
networkProfile:
|
||||
port: 6443
|
||||
addons:
|
||||
coreDNS: {}
|
||||
kubeProxy: {}
|
||||
konnectivity:
|
||||
server:
|
||||
port: 8132
|
||||
@@ -2,29 +2,8 @@
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: mutating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /mutate-kamaji-clastix-io-v1alpha1-datastore
|
||||
failurePolicy: Fail
|
||||
name: mdatastore.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- datastores
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
@@ -49,28 +28,8 @@ webhooks:
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: validating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /validate--v1-secret
|
||||
failurePolicy: Ignore
|
||||
name: vdatastoresecrets.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- DELETE
|
||||
resources:
|
||||
- secrets
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
@@ -92,6 +51,25 @@ webhooks:
|
||||
resources:
|
||||
- datastores
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /validate--v1-secret
|
||||
failurePolicy: Ignore
|
||||
name: vdatastoresecrets.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- DELETE
|
||||
resources:
|
||||
- secrets
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
|
||||
10
controllers/cert_channel.go
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
)
|
||||
|
||||
type CertificateChannel chan event.GenericEvent
|
||||
163
controllers/certificate_lifecycle_controller.go
Normal file
@@ -0,0 +1,163 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientcmdapiv1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/internal/constants"
|
||||
"github.com/clastix/kamaji/internal/crypto"
|
||||
"github.com/clastix/kamaji/internal/utilities"
|
||||
)
|
||||
|
||||
type CertificateLifecycle struct {
|
||||
Channel CertificateChannel
|
||||
client client.Client
|
||||
}
|
||||
|
||||
func (s *CertificateLifecycle) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
logger.Info("starting CertificateLifecycle handling")
|
||||
|
||||
secret := corev1.Secret{}
|
||||
err := s.client.Get(ctx, request.NamespacedName, &secret)
|
||||
if k8serrors.IsNotFound(err) {
|
||||
logger.Info("resource have been deleted, skipping")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
logger.Error(err, "cannot retrieve the required resource")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
checkType, ok := secret.GetLabels()[constants.ControllerLabelResource]
|
||||
if !ok {
|
||||
logger.Info("missing controller label, shouldn't happen")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
var crt *x509.Certificate
|
||||
|
||||
switch checkType {
|
||||
case "x509":
|
||||
crt, err = s.extractCertificateFromBareSecret(secret)
|
||||
case "kubeconfig":
|
||||
crt, err = s.extractCertificateFromKubeconfig(secret)
|
||||
default:
|
||||
err = fmt.Errorf("unsupported strategy, %s", checkType)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logger.Error(err, "skipping reconciliation")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
deadline := time.Now().AddDate(0, 0, 1)
|
||||
|
||||
if deadline.After(crt.NotAfter) {
|
||||
logger.Info("certificate near expiration, must be rotated")
|
||||
|
||||
s.Channel <- event.GenericEvent{Object: &kamajiv1alpha1.TenantControlPlane{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secret.GetOwnerReferences()[0].Name,
|
||||
Namespace: secret.Namespace,
|
||||
},
|
||||
}}
|
||||
|
||||
logger.Info("certificate rotation triggered")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
after := crt.NotAfter.Sub(deadline)
|
||||
|
||||
logger.Info("certificate is still valid, enqueuing back", "after", after.String())
|
||||
|
||||
return reconcile.Result{Requeue: true, RequeueAfter: after}, nil
|
||||
}
|
||||
|
||||
func (s *CertificateLifecycle) extractCertificateFromBareSecret(secret corev1.Secret) (*x509.Certificate, error) {
|
||||
var crt *x509.Certificate
|
||||
var err error
|
||||
|
||||
for _, v := range secret.Data {
|
||||
if crt, err = crypto.ParseCertificateBytes(v); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if crt == nil {
|
||||
return nil, fmt.Errorf("none of the provided keys is containing a valid x509 certificate")
|
||||
}
|
||||
|
||||
return crt, nil
|
||||
}
|
||||
|
||||
func (s *CertificateLifecycle) extractCertificateFromKubeconfig(secret corev1.Secret) (*x509.Certificate, error) {
|
||||
var kc *clientcmdapiv1.Config
|
||||
var err error
|
||||
|
||||
for k := range secret.Data {
|
||||
if kc, err = utilities.DecodeKubeconfig(secret, k); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if kc == nil {
|
||||
return nil, fmt.Errorf("none of the provided keys is containing a valid kubeconfig")
|
||||
}
|
||||
|
||||
crt, err := crypto.ParseCertificateBytes(kc.AuthInfos[0].AuthInfo.ClientCertificateData)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot parse kubeconfig certificate bytes")
|
||||
}
|
||||
|
||||
return crt, nil
|
||||
}
|
||||
|
||||
func (s *CertificateLifecycle) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
s.client = mgr.GetClient()
|
||||
|
||||
supportedStrategies := sets.New[string]("x509", "kubeconfig")
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
For(&corev1.Secret{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
labels := object.GetLabels()
|
||||
|
||||
if labels == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
value, ok := labels[constants.ControllerLabelResource]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return supportedStrategies.Has(value)
|
||||
}))).
|
||||
Complete(s)
|
||||
}
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type DataStore struct {
|
||||
client client.Client
|
||||
Client client.Client
|
||||
// TenantControlPlaneTrigger is the channel used to communicate across the controllers:
|
||||
// if a Data Source is updated we have to be sure that the reconciliation of the certificates content
|
||||
// for each Tenant Control Plane is put in place properly.
|
||||
@@ -39,19 +39,21 @@ func (r *DataStore) Reconcile(ctx context.Context, request reconcile.Request) (r
|
||||
log := log.FromContext(ctx)
|
||||
|
||||
ds := &kamajiv1alpha1.DataStore{}
|
||||
if err := r.client.Get(ctx, request.NamespacedName, ds); err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
err := r.Client.Get(ctx, request.NamespacedName, ds)
|
||||
if k8serrors.IsNotFound(err) {
|
||||
log.Info("resource have been deleted, skipping")
|
||||
|
||||
log.Error(err, "unable to retrieve the request")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
log.Error(err, "cannot retrieve the required resource")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
tcpList := kamajiv1alpha1.TenantControlPlaneList{}
|
||||
|
||||
if err := r.client.List(ctx, &tcpList, client.MatchingFieldsSelector{
|
||||
if err := r.Client.List(ctx, &tcpList, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(kamajiv1alpha1.TenantControlPlaneUsedDataStoreKey, ds.GetName()),
|
||||
}); err != nil {
|
||||
log.Error(err, "cannot retrieve list of the Tenant Control Plane using the following instance")
|
||||
@@ -66,7 +68,7 @@ func (r *DataStore) Reconcile(ctx context.Context, request reconcile.Request) (r
|
||||
|
||||
ds.Status.UsedBy = tcpSets.List()
|
||||
|
||||
if err := r.client.Status().Update(ctx, ds); err != nil {
|
||||
if err := r.Client.Status().Update(ctx, ds); err != nil {
|
||||
log.Error(err, "cannot update the status for the given instance")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
@@ -81,12 +83,6 @@ func (r *DataStore) Reconcile(ctx context.Context, request reconcile.Request) (r
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *DataStore) InjectClient(client client.Client) error {
|
||||
r.client = client
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DataStore) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
enqueueFn := func(tcp *kamajiv1alpha1.TenantControlPlane, limitingInterface workqueue.RateLimitingInterface) {
|
||||
if dataStoreName := tcp.Status.Storage.DataStoreName; len(dataStoreName) > 0 {
|
||||
@@ -102,15 +98,15 @@ func (r *DataStore) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
For(&kamajiv1alpha1.DataStore{}, builder.WithPredicates(
|
||||
predicate.ResourceVersionChangedPredicate{},
|
||||
)).
|
||||
Watches(&source.Kind{Type: &kamajiv1alpha1.TenantControlPlane{}}, handler.Funcs{
|
||||
CreateFunc: func(createEvent event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
WatchesRawSource(source.Kind(mgr.GetCache(), &kamajiv1alpha1.TenantControlPlane{}), handler.Funcs{
|
||||
CreateFunc: func(_ context.Context, createEvent event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
enqueueFn(createEvent.Object.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
UpdateFunc: func(_ context.Context, updateEvent event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
enqueueFn(updateEvent.ObjectOld.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
enqueueFn(updateEvent.ObjectNew.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
},
|
||||
DeleteFunc: func(deleteEvent event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
DeleteFunc: func(_ context.Context, deleteEvent event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
enqueueFn(deleteEvent.Object.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
},
|
||||
}).
|
||||
|
||||
@@ -5,6 +5,7 @@ package finalizers
|
||||
|
||||
const (
|
||||
// DatastoreFinalizer is using a wrong name, since it's related to the underlying datastore.
|
||||
DatastoreFinalizer = "finalizer.kamaji.clastix.io"
|
||||
SootFinalizer = "finalizer.kamaji.clastix.io/soot"
|
||||
DatastoreFinalizer = "finalizer.kamaji.clastix.io"
|
||||
DatastoreSecretFinalizer = "finalizer.kamaji.clastix.io/datastore-secret"
|
||||
SootFinalizer = "finalizer.kamaji.clastix.io/soot"
|
||||
)
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/controllers/finalizers"
|
||||
builder "github.com/clastix/kamaji/internal/builders/controlplane"
|
||||
"github.com/clastix/kamaji/internal/datastore"
|
||||
"github.com/clastix/kamaji/internal/resources"
|
||||
ds "github.com/clastix/kamaji/internal/resources/datastore"
|
||||
@@ -39,6 +40,7 @@ type GroupDeletableResourceBuilderConfiguration struct {
|
||||
tcpReconcilerConfig TenantControlPlaneReconcilerConfig
|
||||
tenantControlPlane kamajiv1alpha1.TenantControlPlane
|
||||
connection datastore.Connection
|
||||
dataStore kamajiv1alpha1.DataStore
|
||||
}
|
||||
|
||||
// GetResources returns a list of resources that will be used to provide tenant control planes
|
||||
@@ -59,6 +61,11 @@ func GetDeletableResources(tcp *kamajiv1alpha1.TenantControlPlane, config GroupD
|
||||
Client: config.client,
|
||||
Connection: config.connection,
|
||||
})
|
||||
res = append(res, &ds.Config{
|
||||
Client: config.client,
|
||||
ConnString: config.connection.GetConnectionString(),
|
||||
DataStore: config.dataStore,
|
||||
})
|
||||
}
|
||||
|
||||
return res
|
||||
@@ -175,6 +182,12 @@ func getKubeconfigResources(c client.Client, tcpReconcilerConfig TenantControlPl
|
||||
KubeConfigFileName: resources.AdminKubeConfigFileName,
|
||||
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
|
||||
},
|
||||
&resources.KubeconfigResource{
|
||||
Name: "admin-kubeconfig",
|
||||
Client: c,
|
||||
KubeConfigFileName: resources.SuperAdminKubeConfigFileName,
|
||||
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
|
||||
},
|
||||
&resources.KubeconfigResource{
|
||||
Name: "controller-manager-kubeconfig",
|
||||
Client: c,
|
||||
@@ -245,7 +258,7 @@ func getKonnectivityServerRequirementsResources(c client.Client) []resources.Res
|
||||
|
||||
func getKonnectivityServerPatchResources(c client.Client) []resources.Resource {
|
||||
return []resources.Resource{
|
||||
&konnectivity.KubernetesDeploymentResource{Client: c},
|
||||
&konnectivity.KubernetesDeploymentResource{Builder: builder.Konnectivity{Scheme: *c.Scheme()}, Client: c},
|
||||
&konnectivity.ServiceResource{Client: c},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ type CoreDNS struct {
|
||||
TriggerChannel chan event.GenericEvent
|
||||
}
|
||||
|
||||
func (c *CoreDNS) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||
func (c *CoreDNS) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
|
||||
tcp, err := c.GetTenantControlPlaneFunc()
|
||||
if err != nil {
|
||||
c.logger.Error(err, "cannot retrieve TenantControlPlane")
|
||||
@@ -79,7 +79,7 @@ func (c *CoreDNS) SetupWithManager(mgr manager.Manager) error {
|
||||
For(&rbacv1.ClusterRoleBinding{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return object.GetName() == kubeadm.CoreDNSClusterRoleBindingName
|
||||
}))).
|
||||
Watches(&source.Channel{Source: c.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: c.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Owns(&rbacv1.ClusterRole{}).
|
||||
Owns(&corev1.ServiceAccount{}).
|
||||
Owns(&corev1.Service{}).
|
||||
|
||||
@@ -80,7 +80,7 @@ func (k *KonnectivityAgent) SetupWithManager(mgr manager.Manager) error {
|
||||
For(&appsv1.DaemonSet{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return object.GetName() == konnectivity.AgentName && object.GetNamespace() == konnectivity.AgentNamespace
|
||||
}))).
|
||||
Watches(&source.Kind{Type: &corev1.ServiceAccount{}}, handler.EnqueueRequestsFromMapFunc(func(object client.Object) []reconcile.Request {
|
||||
WatchesRawSource(source.Kind(mgr.GetCache(), &corev1.ServiceAccount{}), handler.EnqueueRequestsFromMapFunc(func(_ context.Context, object client.Object) []reconcile.Request {
|
||||
if object.GetName() == konnectivity.AgentName && object.GetNamespace() == konnectivity.AgentNamespace {
|
||||
return []reconcile.Request{
|
||||
{
|
||||
@@ -94,7 +94,7 @@ func (k *KonnectivityAgent) SetupWithManager(mgr manager.Manager) error {
|
||||
|
||||
return nil
|
||||
})).
|
||||
Watches(&source.Kind{Type: &v1.ClusterRoleBinding{}}, handler.EnqueueRequestsFromMapFunc(func(object client.Object) []reconcile.Request {
|
||||
WatchesRawSource(source.Kind(mgr.GetCache(), &v1.ClusterRoleBinding{}), handler.EnqueueRequestsFromMapFunc(func(_ context.Context, object client.Object) []reconcile.Request {
|
||||
if object.GetName() == konnectivity.CertCommonName {
|
||||
return []reconcile.Request{
|
||||
{
|
||||
@@ -107,6 +107,6 @@ func (k *KonnectivityAgent) SetupWithManager(mgr manager.Manager) error {
|
||||
|
||||
return nil
|
||||
})).
|
||||
Watches(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Complete(k)
|
||||
}
|
||||
|
||||
@@ -67,6 +67,6 @@ func (k *KubeadmPhase) SetupWithManager(mgr manager.Manager) error {
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
For(k.Phase.GetWatchedObject(), builder.WithPredicates(predicate.NewPredicateFuncs(k.Phase.GetPredicateFunc()))).
|
||||
Watches(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Complete(k)
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ func (k *KubeProxy) SetupWithManager(mgr manager.Manager) error {
|
||||
For(&rbacv1.ClusterRoleBinding{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return object.GetName() == kubeadm.KubeProxyClusterRoleBindingName
|
||||
}))).
|
||||
Watches(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Owns(&corev1.ServiceAccount{}).
|
||||
Owns(&rbacv1.Role{}).
|
||||
Owns(&rbacv1.RoleBinding{}).
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -84,7 +84,7 @@ func (m *Migrate) createOrUpdate(ctx context.Context) error {
|
||||
{
|
||||
Name: "leases.migrate.kamaji.clastix.io",
|
||||
ClientConfig: admissionregistrationv1.WebhookClientConfig{
|
||||
URL: pointer.String(fmt.Sprintf("https://%s.%s.svc:443/migrate", m.WebhookServiceName, m.WebhookNamespace)),
|
||||
URL: pointer.To(fmt.Sprintf("https://%s.%s.svc:443/migrate", m.WebhookServiceName, m.WebhookNamespace)),
|
||||
CABundle: m.WebhookCABundle,
|
||||
},
|
||||
Rules: []admissionregistrationv1.RuleWithOperations{
|
||||
@@ -128,7 +128,7 @@ func (m *Migrate) createOrUpdate(ctx context.Context) error {
|
||||
{
|
||||
Name: "catchall.migrate.kamaji.clastix.io",
|
||||
ClientConfig: admissionregistrationv1.WebhookClientConfig{
|
||||
URL: pointer.String(fmt.Sprintf("https://%s.%s.svc:443/migrate", m.WebhookServiceName, m.WebhookNamespace)),
|
||||
URL: pointer.To(fmt.Sprintf("https://%s.%s.svc:443/migrate", m.WebhookServiceName, m.WebhookNamespace)),
|
||||
CABundle: m.WebhookCABundle,
|
||||
},
|
||||
Rules: []admissionregistrationv1.RuleWithOperations{
|
||||
@@ -187,7 +187,7 @@ func (m *Migrate) SetupWithManager(mgr manager.Manager) error {
|
||||
|
||||
return object.GetName() == vwc.GetName()
|
||||
}))).
|
||||
Watches(&source.Channel{Source: m.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: m.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Complete(m)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,13 +12,13 @@ import (
|
||||
"k8s.io/client-go/util/retry"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
@@ -175,10 +175,12 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
}()
|
||||
|
||||
mgr, err := controllerruntime.NewManager(tcpRest, controllerruntime.Options{
|
||||
Logger: log.Log.WithName(fmt.Sprintf("soot_%s_%s", tcp.GetNamespace(), tcp.GetName())),
|
||||
Scheme: m.client.Scheme(),
|
||||
MetricsBindAddress: "0",
|
||||
NewClient: func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) {
|
||||
Logger: log.Log.WithName(fmt.Sprintf("soot_%s_%s", tcp.GetNamespace(), tcp.GetName())),
|
||||
Scheme: m.client.Scheme(),
|
||||
Metrics: metricsserver.Options{
|
||||
BindAddress: "0",
|
||||
},
|
||||
NewClient: func(config *rest.Config, options client.Options) (client.Client, error) {
|
||||
return client.New(config, client.Options{
|
||||
Scheme: m.client.Scheme(),
|
||||
})
|
||||
@@ -256,6 +258,17 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
if err = bootstrapToken.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
kubeadmRbac := &controllers.KubeadmPhase{
|
||||
GetTenantControlPlaneFunc: m.retrieveTenantControlPlane(tcpCtx, request),
|
||||
Phase: &resources.KubeadmPhase{
|
||||
Client: m.AdminClient,
|
||||
Phase: resources.PhaseClusterAdminRBAC,
|
||||
},
|
||||
}
|
||||
if err = kubeadmRbac.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// Starting the manager
|
||||
go func() {
|
||||
if err = mgr.Start(tcpCtx); err != nil {
|
||||
@@ -289,7 +302,7 @@ func (m *Manager) SetupWithManager(mgr manager.Manager) error {
|
||||
m.sootMap = make(map[string]sootItem)
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
Watches(&source.Channel{Source: m.sootManagerErrChan}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: m.sootManagerErrChan}, &handler.EnqueueRequestForObject{}).
|
||||
For(&kamajiv1alpha1.TenantControlPlane{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
obj := object.(*kamajiv1alpha1.TenantControlPlane) //nolint:forcetypeassert
|
||||
// status is required to understand if we have to start or stop the soot manager
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apimachineryerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/utils/clock"
|
||||
@@ -50,12 +50,17 @@ type TenantControlPlaneReconciler struct {
|
||||
KamajiService string
|
||||
KamajiMigrateImage string
|
||||
MaxConcurrentReconciles int
|
||||
// CertificateChan is the channel used by the CertificateLifecycleController that is checking for
|
||||
// certificates and kubeconfig user certs validity: a generic event for the given TCP will be triggered
|
||||
// once the validity threshold for the given certificate is reached.
|
||||
CertificateChan CertificateChannel
|
||||
|
||||
clock mutex.Clock
|
||||
}
|
||||
|
||||
// TenantControlPlaneReconcilerConfig gives the necessary configuration for TenantControlPlaneReconciler.
|
||||
type TenantControlPlaneReconcilerConfig struct {
|
||||
ReconcileTimeout time.Duration
|
||||
DefaultDataStoreName string
|
||||
KineContainerImage string
|
||||
TmpBaseDirectory string
|
||||
@@ -74,17 +79,20 @@ type TenantControlPlaneReconcilerConfig struct {
|
||||
func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := log.FromContext(ctx)
|
||||
|
||||
var cancelFn context.CancelFunc
|
||||
ctx, cancelFn = context.WithTimeout(ctx, r.Config.ReconcileTimeout)
|
||||
defer cancelFn()
|
||||
|
||||
tenantControlPlane, err := r.getTenantControlPlane(ctx, req.NamespacedName)()
|
||||
if k8serrors.IsNotFound(err) {
|
||||
log.Info("resource have been deleted, skipping")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
if apimachineryerrors.IsNotFound(err) {
|
||||
log.Info("resource may have been deleted, skipping")
|
||||
log.Error(err, "cannot retrieve the required resource")
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
log.Error(err, "cannot retrieve the required instance")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
releaser, err := mutex.Acquire(r.mutexSpec(tenantControlPlane))
|
||||
@@ -136,6 +144,7 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
tcpReconcilerConfig: r.Config,
|
||||
tenantControlPlane: *tenantControlPlane,
|
||||
connection: dsConnection,
|
||||
dataStore: *ds,
|
||||
}
|
||||
|
||||
for _, resource := range GetDeletableResources(tenantControlPlane, groupDeletableResourceBuilderConfiguration) {
|
||||
@@ -218,7 +227,15 @@ func (r *TenantControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error
|
||||
r.clock = clock.RealClock{}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Watches(&source.Channel{Source: r.TriggerChan}, handler.Funcs{GenericFunc: func(genericEvent event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
WatchesRawSource(&source.Channel{Source: r.CertificateChan}, handler.Funcs{GenericFunc: func(_ context.Context, genericEvent event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
limitingInterface.AddRateLimited(ctrl.Request{
|
||||
NamespacedName: k8stypes.NamespacedName{
|
||||
Namespace: genericEvent.Object.GetNamespace(),
|
||||
Name: genericEvent.Object.GetName(),
|
||||
},
|
||||
})
|
||||
}}).
|
||||
WatchesRawSource(&source.Channel{Source: r.TriggerChan}, handler.Funcs{GenericFunc: func(_ context.Context, genericEvent event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
limitingInterface.AddRateLimited(ctrl.Request{
|
||||
NamespacedName: k8stypes.NamespacedName{
|
||||
Namespace: genericEvent.Object.GetNamespace(),
|
||||
@@ -232,7 +249,7 @@ func (r *TenantControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error
|
||||
Owns(&appsv1.Deployment{}).
|
||||
Owns(&corev1.Service{}).
|
||||
Owns(&networkingv1.Ingress{}).
|
||||
Watches(&source.Kind{Type: &batchv1.Job{}}, handler.EnqueueRequestsFromMapFunc(func(object client.Object) []reconcile.Request {
|
||||
WatchesRawSource(source.Kind(mgr.GetCache(), &batchv1.Job{}), handler.EnqueueRequestsFromMapFunc(func(_ context.Context, object client.Object) []reconcile.Request {
|
||||
labels := object.GetLabels()
|
||||
|
||||
name, namespace := labels["tcp.kamaji.clastix.io/name"], labels["tcp.kamaji.clastix.io/namespace"]
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
KUBERNETES_VERSION=$1; shift
|
||||
HOSTS=("$@")
|
||||
|
||||
# Install `containerd` as container runtime.
|
||||
cat << EOF | tee containerd.conf
|
||||
overlay
|
||||
br_netfilter
|
||||
EOF
|
||||
|
||||
cat << EOF | tee 99-kubernetes-cri.conf
|
||||
net.bridge.bridge-nf-call-iptables = 1
|
||||
net.ipv4.ip_forward = 1
|
||||
net.bridge.bridge-nf-call-ip6tables = 1
|
||||
EOF
|
||||
|
||||
for i in "${!HOSTS[@]}"; do
|
||||
HOST=${HOSTS[$i]}
|
||||
ssh ${USER}@${HOST} -t 'sudo apt update && sudo apt install -y containerd'
|
||||
ssh ${USER}@${HOST} -t 'sudo mkdir -p /etc/containerd'
|
||||
ssh ${USER}@${HOST} -t 'containerd config default | sed -e "s#SystemdCgroup = false#SystemdCgroup = true#g" | sudo tee -a /etc/containerd/config.toml'
|
||||
ssh ${USER}@${HOST} -t 'sudo systemctl restart containerd && sudo systemctl enable containerd'
|
||||
scp containerd.conf ${USER}@${HOST}:
|
||||
ssh ${USER}@${HOST} -t 'sudo chown -R root:root containerd.conf && sudo mv containerd.conf /etc/modules-load.d/containerd.conf'
|
||||
ssh ${USER}@${HOST} -t 'sudo modprobe overlay && sudo modprobe br_netfilter'
|
||||
scp 99-kubernetes-cri.conf ${USER}@${HOST}:
|
||||
ssh ${USER}@${HOST} -t 'sudo chown -R root:root 99-kubernetes-cri.conf && sudo mv 99-kubernetes-cri.conf /etc/sysctl.d/99-kubernetes-cri.conf'
|
||||
ssh ${USER}@${HOST} -t 'sudo sysctl --system'
|
||||
done
|
||||
|
||||
rm -f containerd.conf 99-kubernetes-cri.conf
|
||||
|
||||
# Install `kubectl`, `kubelet`, and `kubeadm` in the desired version.
|
||||
|
||||
INSTALL_KUBERNETES="sudo apt install -y kubelet=${KUBERNETES_VERSION}-00 kubeadm=${KUBERNETES_VERSION}-00 kubectl=${KUBERNETES_VERSION}-00 --allow-downgrades --allow-change-held-packages"
|
||||
|
||||
for i in "${!HOSTS[@]}"; do
|
||||
HOST=${HOSTS[$i]}
|
||||
ssh ${USER}@${HOST} -t 'sudo apt update'
|
||||
ssh ${USER}@${HOST} -t 'sudo apt install -y apt-transport-https ca-certificates curl'
|
||||
ssh ${USER}@${HOST} -t 'sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg'
|
||||
ssh ${USER}@${HOST} -t 'echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list'
|
||||
ssh ${USER}@${HOST} -t 'sudo apt update'
|
||||
ssh ${USER}@${HOST} -t ${INSTALL_KUBERNETES}
|
||||
ssh ${USER}@${HOST} -t 'sudo apt-mark hold kubelet kubeadm kubectl'
|
||||
done
|
||||
@@ -1,32 +0,0 @@
|
||||
#cloud-config
|
||||
package_upgrade: true
|
||||
packages:
|
||||
- containerd
|
||||
- apt-transport-https
|
||||
- ca-certificates
|
||||
- curl
|
||||
write_files:
|
||||
- owner: root:root
|
||||
path: /etc/modules-load.d/containerd.conf
|
||||
content: |
|
||||
overlay
|
||||
br_netfilter
|
||||
- owner: root:root
|
||||
path: /etc/sysctl.d/99-kubernetes-cri.conf
|
||||
content: |
|
||||
net.bridge.bridge-nf-call-iptables = 1
|
||||
net.ipv4.ip_forward = 1
|
||||
net.bridge.bridge-nf-call-ip6tables = 1
|
||||
runcmd:
|
||||
- sudo modprobe overlay
|
||||
- sudo modprobe br_netfilter
|
||||
- sudo sysctl --system
|
||||
- sudo mkdir -p /etc/containerd
|
||||
- containerd config default | sed -e 's#SystemdCgroup = false#SystemdCgroup = true#g' | sudo tee -a /etc/containerd/config.toml
|
||||
- sudo systemctl restart containerd
|
||||
- sudo systemctl enable containerd
|
||||
- sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
|
||||
- echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
|
||||
- sudo apt update
|
||||
- sudo apt install -y kubelet=1.25.0-00 kubeadm=1.25.0-00 kubectl=1.25.0-00
|
||||
- sudo apt-mark hold kubelet kubeadm kubectl containerd
|
||||
@@ -1,37 +1,41 @@
|
||||
# Concepts
|
||||
|
||||
Kamaji is a Kubernetes Operator. It turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_.
|
||||
**Kamaji** is a **Kubernetes Control Plane Manager**. It operates Kubernetes at scale with a fraction of the operational burden. Kamaji turns any Kubernetes cluster into a _“Management Cluster”_ to orchestrate other Kubernetes clusters called _“Tenant Clusters”_.
|
||||
|
||||
These are requirements of the design behind Kamaji:
|
||||
|
||||
- Communication between the _“admin cluster”_ and a _“tenant cluster”_ is unidirectional. The _“admin cluster”_ manages a _“tenant cluster”_, but a _“tenant cluster”_ has no awareness of the _“admin cluster”_.
|
||||
- Communication between different _“tenant clusters”_ is not allowed.
|
||||
- Communication between the _“Management Cluster”_ and a _“Tenant Cluster”_ is unidirectional. The _“Management Cluster”_ manages a _“Tenant Cluster”_, but a _“Tenant Cluster”_ has no awareness of the _“Management Cluster”_.
|
||||
- Communication between different _“Tenant Clusters”_ is not allowed.
|
||||
- The worker nodes of tenant should not run anything beyond tenant's workloads.
|
||||
|
||||
Goals and scope may vary as the project evolves.
|
||||
|
||||
## Tenant Control Plane
|
||||
Kamaji is special because the Control Planes of the _“tenant cluster”_ are regular pods running in a namespace of the _“admin cluster”_ instead of a dedicated set of Virtual Machines. This solution makes running Control Planes at scale cheaper and easier to deploy and operate. The Tenant Control Plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
|
||||
Kamaji is special because the Control Planes of the _“Tenant Clusters”_ are regular pods running in a namespace of the _“Management Cluster”_ instead of a dedicated machines. This solution makes running Control Planes at scale cheaper and easier to deploy and operate. The Tenant Control Plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
|
||||
|
||||
High Availability and rolling updates of the Tenant Control Plane pods are provided by a regular Deployment. Autoscaling based on the metrics is available. A Service is used to espose the Tenant Control Plane outside of the _“admin cluster”_. The `LoadBalancer` service type is used, `NodePort` and `ClusterIP` are other viable options, depending on the case.
|
||||
High Availability and rolling updates of the Tenant Control Plane pods are provided by a regular Deployment. Autoscaling based on the metrics is available. A Service is used to espose the Tenant Control Plane outside of the _“Management Cluster”_. The `LoadBalancer` service type is used, `NodePort` and `ClusterIP` are other viable options, depending on the case.
|
||||
|
||||
Kamaji offers a [Custom Resource Definition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing a Tenant Control Plane. This *CRD* is called `TenantControlPlane`, or `tcp` in short.
|
||||
|
||||
All the _“tenant clusters”_ built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves. See [CNCF compliance](reference/conformance.md).
|
||||
All the _“Tenant Clusters”_ built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves. See [CNCF compliance](reference/conformance.md).
|
||||
|
||||
## Tenant worker nodes
|
||||
And what about the tenant worker nodes? They are just _"worker nodes"_, i.e. regular virtual or bare metal machines, connecting to the APIs server of the Tenant Control Plane. Kamaji's goal is to manage the lifecycle of hundreds of these _“tenant clusters”_, not only one, so how to add another tenant cluster to Kamaji? As you could expect, you have just deploys a new Tenant Control Plane in one of the _“admin cluster”_ namespace, and then joins the tenant worker nodes to it.
|
||||
|
||||
We have in roadmap, the Cluster APIs support as well as a Terraform provider so that you can create _“tenant clusters”_ in a declarative way.
|
||||
And what about the tenant worker nodes?
|
||||
They are just _"worker nodes"_, i.e. regular virtual or bare metal machines, connecting to the APIs server of the Tenant Control Plane.
|
||||
Kamaji's goal is to manage the lifecycle of hundreds of these _“Tenant Clusters”_, not only one, so how to add another Tenant Cluster to Kamaji?
|
||||
As you could expect, you have just deploys a new Tenant Control Plane in one of the _“Management Cluster”_ namespace, and then joins the tenant worker nodes to it.
|
||||
|
||||
A [Cluster API ControlPlane provider](https://github.com/clastix/cluster-api-control-plane-provider-kamaji) has been released, allowing to offer a Cluster API-native declarative lifecycle, by automating the worker nodes join.
|
||||
|
||||
## Datastores
|
||||
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each tenant cluster saves the state to be able to store and retrieve data. As we can deploy a Kubernetes cluster with an external `etcd` cluster, we explored this option for the Tenant Control Planes. On the admin cluster, you can deploy one or multi-tenant `etcd` to save the state of multiple tenant clusters. Kamaji offers a Custom Resource Definition called `DataStore` to provide a declarative approach of managing multiple datastores. By sharing the datastore between multiple tenants, the resiliency is still guaranteed and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that you have to operate external datastores, in addition to `etcd` of the _“admin cluster”_ and manage the access to be sure that each _“tenant cluster”_ uses only its data.
|
||||
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each Tenant Cluster saves the state to be able to store and retrieve data. As we can deploy a Kubernetes cluster with an external `etcd` cluster, we explored this option for the Tenant Control Planes. On the Management Cluster, you can deploy one or multi-tenant `etcd` to save the state of multiple Tenant Clusters. Kamaji offers a Custom Resource Definition called `DataStore` to provide a declarative approach of managing multiple datastores. By sharing the datastore between multiple tenants, the resiliency is still guaranteed and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that you have to operate external datastores, in addition to `etcd` of the _“Management Cluster”_ and manage the access to be sure that each _“Tenant Cluster”_ uses only its data.
|
||||
|
||||
### Other storage drivers
|
||||
Kamaji offers the option of using a more capable datastore than `etcd` to save the state of multiple tenants' clusters. Thanks to the native [kine](https://github.com/k3s-io/kine) integration, you can run _MySQL_ or _PostgreSQL_ compatible databases as datastore for _“tenant clusters”_.
|
||||
Kamaji offers the option of using a more capable datastore than `etcd` to save the state of multiple tenants' clusters. Thanks to the native [kine](https://github.com/k3s-io/kine) integration, you can run _MySQL_ or _PostgreSQL_ compatible databases as datastore for _“Tenant Clusters”_.
|
||||
|
||||
### Pooling
|
||||
By default, Kamaji is expecting to persist all the _“tenant clusters”_ data in a unique datastore that could be backed by different drivers. However, you can pick a different datastore for a specific set of _“tenant clusters”_ that could have different resources assigned or a different tiering. Pooling of multiple datastore is an option you can leverage for a very large set of _“tenant clusters”_ so you can distribute the load properly. As future improvements, we have a _datastore scheduler_ feature in roadmap so that Kamaji itself can assign automatically a _“tenant cluster”_ to the best datastore in the pool.
|
||||
By default, Kamaji is expecting to persist all the _“Tenant Clusters”_ data in a unique datastore that could be backed by different drivers. However, you can pick a different datastore for a specific set of _“Tenant Clusters”_ that could have different resources assigned or a different tiering. Pooling of multiple datastore is an option you can leverage for a very large set of _“Tenant Clusters”_ so you can distribute the load properly. As future improvements, we have a _datastore scheduler_ feature in roadmap so that Kamaji itself can assign automatically a _“Tenant Cluster”_ to the best datastore in the pool.
|
||||
|
||||
### Migration
|
||||
In order to simplify Day2 Operations and reduce the operational burden, Kamaji provides the capability to live migrate data from a datastore to another one of the same driver without manual and error prone backup and restore operations.
|
||||
|
||||
@@ -41,11 +41,7 @@ Please, split changes into several and documented small commits: this will help
|
||||
## Code convention
|
||||
|
||||
Kamaji is written in Golang. The changes must follow the Pull Request method where a _GitHub Action_ will
|
||||
check the `golangci-lint`, so ensure your changes respect the coding standard.
|
||||
|
||||
### golint
|
||||
|
||||
You can easily check them issuing the _Make_ recipe `golint`.
|
||||
check the `golangci-lint`, so ensure your changes respect the coding standard. You can easily check them issuing the _Make_ recipe `golint`.
|
||||
|
||||
```
|
||||
# make golint
|
||||
@@ -54,10 +50,10 @@ golangci-lint run -c .golangci.yml
|
||||
|
||||
> Enabled linters and related options are defined in the [.golanci.yml file](https://github.com/clastix/Kamaji/blob/master/.golangci.yml)
|
||||
|
||||
Please, add a new single line at end of any file as the current coding style.
|
||||
|
||||
## Finding contributions to work on
|
||||
Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the
|
||||
default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted'
|
||||
and 'good first issue' issues are a great place to start.
|
||||
Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' and 'good first issue' issues are a great place to start.
|
||||
|
||||
## Design Docs
|
||||
|
||||
@@ -74,10 +70,31 @@ When filing an issue, please check existing open, or recently closed, issues to
|
||||
* Any modifications you've made relevant to the bug
|
||||
* Anything unusual about your environment or deployment
|
||||
|
||||
## Miscellanea
|
||||
## Governance
|
||||
|
||||
This document lays out the guidelines under which the Kamaji project will be governed.
|
||||
The goal is to make sure that the roles and responsibilities are well defined and clarify how decisions are made.
|
||||
|
||||
### Roles
|
||||
|
||||
In the context of Kamaji project, we consider the following roles:
|
||||
|
||||
* __Users__: everyone using Kamaji, typically willing to provide feedback by proposing features and/or filing issues.
|
||||
|
||||
* __Contributors__: everyone contributing code, documentation, examples, tests, and participating in feature proposals as well as design discussions.
|
||||
|
||||
* __Maintainers__: are responsible for engaging with and assisting contributors to iterate on the contributions until it reaches acceptable quality. Maintainers can decide whether the contributions can be accepted into the project or rejected.
|
||||
|
||||
### Release Management
|
||||
|
||||
The release process will be governed by Maintainers.
|
||||
|
||||
### Roadmap Planning
|
||||
|
||||
Maintainers will share roadmap and release versions as milestones in GitHub [project's page](https://github.com/clastix/kamaji).
|
||||
|
||||
Please, add a new single line at end of any file as the current coding style.
|
||||
|
||||
## Licensing
|
||||
|
||||
See the [LICENSE](https://github.com/clastix/Kamaji/blob/master/LICENSE) file for our project's licensing. We can ask you to confirm the licensing of your contribution.
|
||||
See the [LICENSE](https://github.com/clastix/Kamaji/blob/master/LICENSE) file for our project's licensing. We can ask you to confirm the licensing of your contribution.
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
# Governance
|
||||
|
||||
This document lays out the guidelines under which the Kamaji project will be governed.
|
||||
The goal is to make sure that the roles and responsibilities are well defined and clarify how decisions are made.
|
||||
|
||||
## Roles
|
||||
|
||||
In the context of Kamaji project, we consider the following roles:
|
||||
|
||||
* __Users__: everyone using Kamaji, typically willing to provide feedback by proposing features and/or filing issues.
|
||||
|
||||
* __Contributors__: everyone contributing code, documentation, examples, tests, and participating in feature proposals as well as design discussions.
|
||||
|
||||
* __Maintainers__: are responsible for engaging with and assisting contributors to iterate on the contributions until it reaches acceptable quality. Maintainers can decide whether the contributions can be accepted into the project or rejected.
|
||||
|
||||
## Release Management
|
||||
|
||||
The release process will be governed by Maintainers.
|
||||
|
||||
## Roadmap Planning
|
||||
|
||||
Maintainers will share roadmap and release versions as milestones in GitHub [project's page](https://github.com/clastix/kamaji).
|
||||
@@ -1,2 +0,0 @@
|
||||
# Guidelines
|
||||
Guidelines for community contributions.
|
||||
@@ -1,191 +1,389 @@
|
||||
# Getting started
|
||||
# Getting started with Kamaji
|
||||
This guide will lead you through the process of creating a working Kamaji setup on a generic infrastructure.
|
||||
|
||||
This document explains how to deploy a minimal Kamaji setup on [KinD](https://kind.sigs.k8s.io/) for development scopes. Please refer to the [Kamaji documentation](concepts.md) for understanding all the terms used in this guide, as for example: `admin cluster`, `tenant cluster`, and `tenant control plane`.
|
||||
!!! warning ""
|
||||
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
|
||||
|
||||
## Pre-requisites
|
||||
The guide requires:
|
||||
|
||||
We assume you have installed on your workstation:
|
||||
- a bootstrap machine
|
||||
- a Kubernetes cluster to run the Admin and Tenant Control Planes
|
||||
- an arbitrary number of machines to host `Tenant`s' workloads
|
||||
|
||||
## Summary
|
||||
|
||||
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
|
||||
* [Access Management Cluster](#access-management-cluster)
|
||||
* [Install Cert Manager](#install-cert-manager)
|
||||
* [Install Kamaji controller](#install-kamaji-controller)
|
||||
* [Create Tenant Cluster](#create-tenant-cluster)
|
||||
* [Cleanup](#cleanup)
|
||||
|
||||
## Prepare the bootstrap workspace
|
||||
On the bootstrap machine, clone the repo and prepare the workspace directory:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/clastix/kamaji
|
||||
cd kamaji/deploy
|
||||
```
|
||||
|
||||
We assume you have installed on the bootstrap workstation:
|
||||
|
||||
- [Docker](https://docker.com)
|
||||
- [KinD](https://kind.sigs.k8s.io/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
|
||||
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
|
||||
- [Helm](https://helm.sh/docs/intro/install/)
|
||||
- [helm](https://helm.sh/docs/intro/install/)
|
||||
- [jq](https://stedolan.github.io/jq/)
|
||||
- [openssl](https://www.openssl.org/)
|
||||
- [cfssl/cfssljson](https://github.com/cloudflare/cfssl)
|
||||
|
||||
## Access Management Cluster
|
||||
In Kamaji, the Management Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The Management Cluster acts as cockpit for all the Tenant Clusters as it hosts monitoring, logging, and governance of Kamaji setup, including all Tenant Clusters.
|
||||
|
||||
> Starting from Kamaji v0.1.0, `kubectl` and `kubeadm` need to meet at least minimum version to `v1.25.0` due to the changes regarding the `kubelet-config` ConfigMap required for the node join.
|
||||
|
||||
## Setup Kamaji on KinD
|
||||
|
||||
The instance of Kamaji is made of a single node hosting:
|
||||
|
||||
- admin control-plane
|
||||
- admin worker
|
||||
- multi-tenant datastore
|
||||
|
||||
### Standard Installation
|
||||
|
||||
You can install your KinD cluster, an `etcd` based multi-tenant datastore and the Kamaji operator with a **single command**:
|
||||
Throughout the following instructions, shell variables are used to indicate values that you should adjust to your environment:
|
||||
|
||||
```bash
|
||||
$ make -C deploy/kind
|
||||
source kamaji.env
|
||||
```
|
||||
|
||||
Now you can deploy a [`TenantControlPlane`](#deploy-tenant-control-plane).
|
||||
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the Management Clusterr should provide:
|
||||
|
||||
### Installation with alternative datastore drivers
|
||||
- CNI module installed, eg. [Calico](https://github.com/projectcalico/calico), [Cilium](https://github.com/cilium/cilium).
|
||||
- CSI module installed with a Storage Class for the Tenant datastores. Local Persistent Volumes are an option.
|
||||
- Support for LoadBalancer service type, eg. [MetalLB](https://metallb.universe.tf/), or a Cloud based controller.
|
||||
- Optionally, a Monitoring Stack installed, eg. [Prometheus](https://github.com/prometheus-community).
|
||||
|
||||
Kamaji offers the possibility of using a different storage system than `etcd` for datastore, like `MySQL` or `PostgreSQL` compatible databases.
|
||||
|
||||
First, setup a KinD cluster and the other requirements:
|
||||
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into Kamaji Management Cluster and check you can access:
|
||||
|
||||
```bash
|
||||
$ make -C deploy/kind reqs
|
||||
kubectl cluster-info
|
||||
```
|
||||
|
||||
Install one of the alternative supported databases:
|
||||
## Install Cert Manager
|
||||
|
||||
- **MySQL** install it with command:
|
||||
|
||||
`$ make -C deploy/kine/mysql mariadb`
|
||||
|
||||
- **PostgreSQL** install it with command:
|
||||
|
||||
`$ make -C deploy/kine/postgresql postgresql`
|
||||
|
||||
Then use Helm to install the Kamaji Operator and make sure it uses a datastore with the proper driver `datastore.driver=<MySQL|PostgreSQL>`.
|
||||
|
||||
For example, with a PostreSQL datastore:
|
||||
Kamaji takes advantage of the [dynamic admission control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), such as validating and mutating webhook configurations. These webhooks are secured by a TLS communication, and the certificates are managed by [`cert-manager`](https://cert-manager.io/), making it a prerequisite that must be installed:
|
||||
|
||||
```bash
|
||||
helm install kamaji charts/kamaji -n kamaji-system --create-namespace \
|
||||
--set etcd.deploy=false \
|
||||
--set datastore.driver=PostgreSQL \
|
||||
--set datastore.endpoints[0]=postgres-default-rw.kamaji-system.svc:5432 \
|
||||
--set datastore.basicAuth.usernameSecret.name=postgres-default-superuser \
|
||||
--set datastore.basicAuth.usernameSecret.namespace=kamaji-system \
|
||||
--set datastore.basicAuth.usernameSecret.keyPath=username \
|
||||
--set datastore.basicAuth.passwordSecret.name=postgres-default-superuser \
|
||||
--set datastore.basicAuth.passwordSecret.namespace=kamaji-system \
|
||||
--set datastore.basicAuth.passwordSecret.keyPath=password \
|
||||
--set datastore.tlsConfig.certificateAuthority.certificate.name=postgres-default-ca \
|
||||
--set datastore.tlsConfig.certificateAuthority.certificate.namespace=kamaji-system \
|
||||
--set datastore.tlsConfig.certificateAuthority.certificate.keyPath=ca.crt \
|
||||
--set datastore.tlsConfig.certificateAuthority.privateKey.name=postgres-default-ca \
|
||||
--set datastore.tlsConfig.certificateAuthority.privateKey.namespace=kamaji-system \
|
||||
--set datastore.tlsConfig.certificateAuthority.privateKey.keyPath=ca.key \
|
||||
--set datastore.tlsConfig.clientCertificate.certificate.name=postgres-default-root-cert \
|
||||
--set datastore.tlsConfig.clientCertificate.certificate.namespace=kamaji-system \
|
||||
--set datastore.tlsConfig.clientCertificate.certificate.keyPath=tls.crt \
|
||||
--set datastore.tlsConfig.clientCertificate.privateKey.name=postgres-default-root-cert \
|
||||
--set datastore.tlsConfig.clientCertificate.privateKey.namespace=kamaji-system \
|
||||
--set datastore.tlsConfig.clientCertificate.privateKey.keyPath=tls.key
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update
|
||||
helm install \
|
||||
cert-manager jetstack/cert-manager \
|
||||
--namespace cert-manager \
|
||||
--create-namespace \
|
||||
--version v1.11.0 \
|
||||
--set installCRDs=true
|
||||
```
|
||||
|
||||
### Deploy Tenant Control Plane
|
||||
## Install Kamaji Controller
|
||||
|
||||
Now it is the moment of deploying your first tenant control plane.
|
||||
Installing Kamaji via Helm charts is the preferred way. The Kamaji controller needs to access a Datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unmanaged `etcd` as datastore, out of box.
|
||||
|
||||
Install Kamaji with `helm` using an unmanaged `etcd` as default datastore:
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f - <<EOF
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
|
||||
```
|
||||
|
||||
!!! note "A managed datastore is highly recommended in production"
|
||||
The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides the code to setup a multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a more robust storage system, as `MySQL` or `PostgreSQL` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
|
||||
|
||||
Now you should end up with a working Kamaji instance, including the default `datastore`:
|
||||
|
||||
```bash
|
||||
kubectl -n kamaji-system get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
etcd-0 1/1 Running 0 50s
|
||||
etcd-1 1/1 Running 0 60s
|
||||
etcd-2 1/1 Running 0 90s
|
||||
kamaji-7949578bfb-lj44p 1/1 Running 0 12s
|
||||
```
|
||||
|
||||
> An unsuccessful first installation could fail for several reasons, such as missing a `StorageClass`, or even for a trivial `Ctrl+C` during the installation phase.
|
||||
>
|
||||
> See the [Cleanup](#cleanup) section before to retry an aborted installation.
|
||||
|
||||
## Create Tenant Cluster
|
||||
|
||||
### Tenant Control Plane
|
||||
|
||||
A tenant control plane of example looks like:
|
||||
|
||||
```yaml
|
||||
cat > ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml <<EOF
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: tenant1
|
||||
name: ${TENANT_NAME}
|
||||
namespace: ${TENANT_NAMESPACE}
|
||||
labels:
|
||||
tenant.clastix.io: ${TENANT_NAME}
|
||||
spec:
|
||||
dataStore: default
|
||||
controlPlane:
|
||||
deployment:
|
||||
replicas: 2
|
||||
replicas: 3
|
||||
additionalMetadata:
|
||||
annotations:
|
||||
environment.clastix.io: tenant1
|
||||
tier.clastix.io: "0"
|
||||
labels:
|
||||
tenant.clastix.io: tenant1
|
||||
kind.clastix.io: deployment
|
||||
tenant.clastix.io: ${TENANT_NAME}
|
||||
extraArgs:
|
||||
apiServer: []
|
||||
controllerManager: []
|
||||
scheduler: []
|
||||
resources:
|
||||
apiServer:
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 512Mi
|
||||
limits: {}
|
||||
controllerManager:
|
||||
requests:
|
||||
cpu: 125m
|
||||
memory: 256Mi
|
||||
limits: {}
|
||||
scheduler:
|
||||
requests:
|
||||
cpu: 125m
|
||||
memory: 256Mi
|
||||
limits: {}
|
||||
service:
|
||||
additionalMetadata:
|
||||
annotations:
|
||||
environment.clastix.io: tenant1
|
||||
tier.clastix.io: "0"
|
||||
labels:
|
||||
tenant.clastix.io: tenant1
|
||||
kind.clastix.io: service
|
||||
serviceType: NodePort
|
||||
tenant.clastix.io: ${TENANT_NAME}
|
||||
serviceType: LoadBalancer
|
||||
kubernetes:
|
||||
version: "v1.23.4"
|
||||
version: ${TENANT_VERSION}
|
||||
kubelet:
|
||||
cgroupfs: cgroupfs
|
||||
cgroupfs: systemd
|
||||
admissionControllers:
|
||||
- LimitRanger
|
||||
- ResourceQuota
|
||||
- ResourceQuota
|
||||
- LimitRanger
|
||||
networkProfile:
|
||||
address: "172.18.0.2"
|
||||
port: 31443
|
||||
port: ${TENANT_PORT}
|
||||
certSANs:
|
||||
- "test.clastixlabs.io"
|
||||
serviceCidr: "10.96.0.0/16"
|
||||
podCidr: "10.244.0.0/16"
|
||||
dnsServiceIPs:
|
||||
- "10.96.0.10"
|
||||
- ${TENANT_NAME}.${TENANT_DOMAIN}
|
||||
serviceCidr: ${TENANT_SVC_CIDR}
|
||||
podCidr: ${TENANT_POD_CIDR}
|
||||
dnsServiceIPs:
|
||||
- ${TENANT_DNS_SERVICE}
|
||||
addons:
|
||||
coreDNS: {}
|
||||
kubeProxy: {}
|
||||
konnectivity:
|
||||
server:
|
||||
port: ${TENANT_PROXY_PORT}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits: {}
|
||||
EOF
|
||||
|
||||
kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
|
||||
```
|
||||
|
||||
> Check networkProfile fields according to your installation
|
||||
> To let Kamaji works in kind, you have indicate that the service must be [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport)
|
||||
After a few seconds, check the created resources in the tenants namespace and when ready it will look similar to the following:
|
||||
|
||||
### Get the kubeconfig
|
||||
```command
|
||||
kubectl -n ${TENANT_NAMESPACE} get tcp,deploy,pods,svc
|
||||
|
||||
Let's retrieve kubeconfig and store in `/tmp/kubeconfig`
|
||||
NAME VERSION STATUS CONTROL-PLANE ENDPOINT KUBECONFIG DATASTORE AGE
|
||||
tenantcontrolplane/tenant-00 v1.25.2 Ready 192.168.32.240:6443 tenant-00-admin-kubeconfig default 2m20s
|
||||
|
||||
```bash
|
||||
$ kubectl get secrets tenant1-admin-kubeconfig -o json \
|
||||
| jq -r '.data["admin.conf"]' \
|
||||
| base64 -d > /tmp/kubeconfig
|
||||
```
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
deployment.apps/tenant-00 3/3 3 3 118s
|
||||
|
||||
It can be export it, to facilitate the next tasks:
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
pod/tenant-00-58847c8cdd-7hc4n 4/4 Running 0 82s
|
||||
pod/tenant-00-58847c8cdd-ft5xt 4/4 Running 0 82s
|
||||
pod/tenant-00-58847c8cdd-shc7t 4/4 Running 0 82s
|
||||
|
||||
```bash
|
||||
$ export KUBECONFIG=/tmp/kubeconfig
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
service/tenant-00 LoadBalancer 10.32.132.241 192.168.32.240 6443:32152/TCP,8132:32713/TCP 2m20s
|
||||
```
|
||||
|
||||
### Install CNI
|
||||
The regular Tenant Control Plane containers: `kube-apiserver`, `kube-controller-manager`, `kube-scheduler` are running unchanged in the `tcp` pods instead of dedicated machines and they are exposed through a service on the port `6443` of worker nodes in the Management Cluster.
|
||||
|
||||
We highly recommend to install [kindnet](https://github.com/aojea/kindnet) as CNI for your kamaji TCP.
|
||||
The `LoadBalancer` service type is used to expose the Tenant Control Plane on the assigned `loadBalancerIP` acting as `ControlPlaneEndpoint` for the worker nodes and other clients as, for example, `kubectl`. Service types `NodePort` and `ClusterIP` are still viable options to expose the Tenant Control Plane, depending on the case. High Availability and rolling updates of the Tenant Control Planes are provided by the `tcp` Deployment and all the resources reconcilied by the Kamaji controller.
|
||||
|
||||
### Working with Tenant Control Plane
|
||||
|
||||
Collect the external IP address of the `tcp` service:
|
||||
|
||||
```bash
|
||||
$ kubectl create -f https://raw.githubusercontent.com/aojea/kindnet/master/install-kindnet.yaml
|
||||
TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."spec.loadBalancerIP")
|
||||
```
|
||||
|
||||
and check it out:
|
||||
|
||||
```bash
|
||||
curl -k https://${TENANT_ADDR}:${TENANT_PORT}/healthz
|
||||
curl -k https://${TENANT_ADDR}:${TENANT_PORT}/version
|
||||
```
|
||||
|
||||
The `kubeconfig` required to access the Tenant Control Plane is stored in a secret:
|
||||
|
||||
```bash
|
||||
kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-admin-kubeconfig -o json \
|
||||
| jq -r '.data["admin.conf"]' \
|
||||
| base64 --decode \
|
||||
> ${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
|
||||
```
|
||||
|
||||
and let's check it out:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig cluster-info
|
||||
|
||||
Kubernetes control plane is running at https://192.168.32.240:6443
|
||||
CoreDNS is running at https://192.168.32.240:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
|
||||
```
|
||||
|
||||
Check out how the Tenant Control Plane advertises itself to workloads:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get svc
|
||||
|
||||
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
default kubernetes ClusterIP 10.32.0.1 <none> 443/TCP 6m
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get ep
|
||||
|
||||
NAME ENDPOINTS AGE
|
||||
kubernetes 192.168.32.240:6443 18m
|
||||
```
|
||||
|
||||
And make sure it is `${TENANT_ADDR}:${TENANT_PORT}`.
|
||||
|
||||
### Join worker nodes
|
||||
|
||||
```bash
|
||||
$ make -C deploy/kind kamaji-kind-worker-join
|
||||
```
|
||||
The Tenant Control Plane is made of pods running in the Kamaji Management Cluster. At this point, the Tenant Cluster has no worker nodes. So, the next step is to join some worker nodes to the Tenant Control Plane.
|
||||
|
||||
> To add more worker nodes, run again the command above.
|
||||
Kamaji does not provide any helper for creation of tenant worker nodes, instead it leverages the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Refer to the [Cluster API guide](guides/cluster-api.md) to learn more about supported providers.
|
||||
|
||||
Check out the node:
|
||||
An alternative approach for joining nodes is to use the `kubeadm` command on each node. Follow the related [documentation](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) in order to:
|
||||
|
||||
- install `containerd` as container runtime
|
||||
- install `crictl`, the command line for working with `containerd`
|
||||
- install `kubectl`, `kubelet`, and `kubeadm` in the desired version
|
||||
|
||||
After the installation is complete on all the nodes, open the command line on your Linux workstation and store the IP address of each node in an environment variable:
|
||||
|
||||
```bash
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
d2d4b468c9de Ready <none> 44s v1.23.4
|
||||
WORKER0=<address of first node>
|
||||
WORKER1=<address of second node>
|
||||
WORKER2=<address of third node>
|
||||
```
|
||||
|
||||
> For more complex scenarios (exposing port, different version and so on), run `join-node.bash`.
|
||||
Store the join command in a variable:
|
||||
|
||||
Tenant control plane provision has been finished in a minimal Kamaji setup based on KinD. Therefore, you could develop, test and make your own experiments with Kamaji.
|
||||
```bash
|
||||
JOIN_CMD=$(echo "sudo ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command)
|
||||
|
||||
```
|
||||
|
||||
Use a loop to log in to and run the join command on each node:
|
||||
|
||||
```bash
|
||||
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
|
||||
for i in "${!HOSTS[@]}"; do
|
||||
HOST=${HOSTS[$i]}
|
||||
ssh ${USER}@${HOST} -t ${JOIN_CMD};
|
||||
done
|
||||
```
|
||||
|
||||
!!! tip "yaki"
|
||||
This manual process can be further automated to handle the node prerequisites and joining. See [yaki](https://github.com/clastix/yaki) script, which you could modify for your preferred operating system and version. The provided script is just a facility: it assumes all worker nodes are running `Ubuntu 22.04`. Make sure to adapt the script if you're using a different distribution.
|
||||
|
||||
|
||||
Checking the nodes:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
|
||||
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
tenant-00-worker-00 NotReady <none> 25s v1.25.0
|
||||
tenant-00-worker-01 NotReady <none> 17s v1.25.0
|
||||
tenant-00-worker-02 NotReady <none> 9s v1.25.0
|
||||
```
|
||||
|
||||
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In this guide, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico), but feel free to use one of your taste.
|
||||
|
||||
Download the latest stable Calico manifest:
|
||||
|
||||
```bash
|
||||
curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/calico.yaml -O
|
||||
```
|
||||
|
||||
Before to apply the Calico manifest, you can customize it as necessary according to your preferences.
|
||||
|
||||
Apply to the Tenant Cluster:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig apply -f calico.yaml
|
||||
```
|
||||
|
||||
And after a while, nodes will be ready
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
tenant-00-worker-00 Ready <none> 2m48s v1.25.0
|
||||
tenant-00-worker-01 Ready <none> 2m40s v1.25.0
|
||||
tenant-00-worker-02 Ready <none> 2m32s v1.25.0
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
### Delete a Tenant Cluster
|
||||
First, remove the worker nodes joined the tenant control plane
|
||||
|
||||
```bash
|
||||
$ make -C deploy/kind destroy
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig delete nodes --all
|
||||
```
|
||||
|
||||
For each worker node, login and clean it
|
||||
|
||||
```bash
|
||||
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
|
||||
for i in "${!HOSTS[@]}"; do
|
||||
HOST=${HOSTS[$i]}
|
||||
ssh ${USER}@${HOST} -t 'sudo kubeadm reset -f';
|
||||
ssh ${USER}@${HOST} -t 'sudo rm -rf /etc/cni/net.d';
|
||||
ssh ${USER}@${HOST} -t 'sudo systemctl reboot';
|
||||
done
|
||||
```
|
||||
|
||||
Delete the tenant control plane from Kamaji
|
||||
|
||||
```bash
|
||||
kubectl delete -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
|
||||
```
|
||||
|
||||
### Uninstall Kamaji
|
||||
Uninstall the Kamaji controller by removing the Helm release
|
||||
|
||||
```bash
|
||||
helm uninstall kamaji -n kamaji-system
|
||||
```
|
||||
|
||||
The default datastore installed three `etcd` replicas with persistent volumes, so remove the `PersistentVolumeClaims` resources:
|
||||
|
||||
```bash
|
||||
kubectl -n kamaji-system delete pvc --all
|
||||
```
|
||||
|
||||
Also delete the custom resources:
|
||||
|
||||
```bash
|
||||
kubectl delete crd tenantcontrolplanes.kamaji.clastix.io
|
||||
kubectl delete crd datastores.kamaji.clastix.io
|
||||
```
|
||||
|
||||
In case of a broken installation, manually remove the hooks installed by Kamaji:
|
||||
|
||||
```bash
|
||||
kubectl delete ValidatingWebhookConfiguration kamaji-validating-webhook-configuration
|
||||
kubectl delete MutatingWebhookConfiguration kamaji-mutating-webhook-configuration
|
||||
```
|
||||
|
||||
That's all folks!
|
||||
|
||||
63
docs/content/guides/alternative-datastore.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Use Alternative Datastores
|
||||
|
||||
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine) integration. One of the implementations is [PostgreSQL](https://www.postgresql.org/).
|
||||
|
||||
## Install the datastore
|
||||
|
||||
On the Management Cluster, install one of the alternative supported datastore:
|
||||
|
||||
- **MySQL** install it with command:
|
||||
|
||||
`$ make -C deploy/kine/mysql mariadb`
|
||||
|
||||
- **PostgreSQL** install it with command:
|
||||
|
||||
`$ make -C deploy/kine/postgresql postgresql`
|
||||
|
||||
## Install Cert Manager
|
||||
|
||||
As prerequisite for Kamaji, install the Cert Manager
|
||||
|
||||
```bash
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update
|
||||
helm install \
|
||||
cert-manager jetstack/cert-manager \
|
||||
--namespace cert-manager \
|
||||
--create-namespace \
|
||||
--version v1.11.0 \
|
||||
--set installCRDs=true
|
||||
```
|
||||
|
||||
## Install Kamaji
|
||||
|
||||
Use Helm to install the Kamaji Operator and make sure it uses a datastore with the proper driver `datastore.driver=<MySQL|PostgreSQL>`.
|
||||
|
||||
For example, with a PostreSQL datastore installed:
|
||||
|
||||
```bash
|
||||
helm install kamaji charts/kamaji -n kamaji-system --create-namespace \
|
||||
--set etcd.deploy=false \
|
||||
--set datastore.driver=PostgreSQL \
|
||||
--set datastore.endpoints[0]=postgres-default-rw.kamaji-system.svc:5432 \
|
||||
--set datastore.basicAuth.usernameSecret.name=postgres-default-superuser \
|
||||
--set datastore.basicAuth.usernameSecret.namespace=kamaji-system \
|
||||
--set datastore.basicAuth.usernameSecret.keyPath=username \
|
||||
--set datastore.basicAuth.passwordSecret.name=postgres-default-superuser \
|
||||
--set datastore.basicAuth.passwordSecret.namespace=kamaji-system \
|
||||
--set datastore.basicAuth.passwordSecret.keyPath=password \
|
||||
--set datastore.tlsConfig.certificateAuthority.certificate.name=postgres-default-ca \
|
||||
--set datastore.tlsConfig.certificateAuthority.certificate.namespace=kamaji-system \
|
||||
--set datastore.tlsConfig.certificateAuthority.certificate.keyPath=ca.crt \
|
||||
--set datastore.tlsConfig.certificateAuthority.privateKey.name=postgres-default-ca \
|
||||
--set datastore.tlsConfig.certificateAuthority.privateKey.namespace=kamaji-system \
|
||||
--set datastore.tlsConfig.certificateAuthority.privateKey.keyPath=ca.key \
|
||||
--set datastore.tlsConfig.clientCertificate.certificate.name=postgres-default-root-cert \
|
||||
--set datastore.tlsConfig.clientCertificate.certificate.namespace=kamaji-system \
|
||||
--set datastore.tlsConfig.clientCertificate.certificate.keyPath=tls.crt \
|
||||
--set datastore.tlsConfig.clientCertificate.privateKey.name=postgres-default-root-cert \
|
||||
--set datastore.tlsConfig.clientCertificate.privateKey.namespace=kamaji-system \
|
||||
--set datastore.tlsConfig.clientCertificate.privateKey.keyPath=tls.key
|
||||
```
|
||||
|
||||
Once installed, you will able to create Tenant Control Planes using an alternative datastore.
|
||||
@@ -1,18 +1,18 @@
|
||||
# Backup and restore
|
||||
# Backup and Restore
|
||||
|
||||
As mentioned in the introduction, Kamaji “tenant clusters” are just regular pods scheduled on top of a choosn admin cluster; as such, you can take advantage of the same backup and restore methods that you would use to maintain the standard workload.
|
||||
As mentioned in the introduction, Tenant Control Planes are just regular pods scheduled in the Management Cluster. As such, you can take advantage of the same backup and restore methods that you would use to maintain the standard workload.
|
||||
|
||||
This guide will assist you in how to backup and restore TCP resources on the admin cluster using [Velero](https://tanzu.vmware.com/developer/guides/what-is-velero/).
|
||||
This guide will assist you in how to backup and restore TCP resources on the Management Cluster using [Velero](https://tanzu.vmware.com/developer/guides/what-is-velero/).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before proceeding with the next steps, we assume that the following prerequisites are met:
|
||||
|
||||
- Working admin cluster
|
||||
- Working Kamaji setup
|
||||
- Working datastore resource
|
||||
- Working TCP resource
|
||||
- Velero binary installed on the operator VM
|
||||
- Velero installed on the admin cluster
|
||||
- Velero installed on the Management Cluster
|
||||
- Configured BackupStorageLocation for Velero
|
||||
|
||||
## Backup step
|
||||
|
||||
143
docs/content/guides/certs-lifecycle.md
Normal file
@@ -0,0 +1,143 @@
|
||||
# Certificates Lifecycle
|
||||
|
||||
Kamaji is responsible for creating the required certificates, such as:
|
||||
|
||||
- the Kubernetes API Server certificate
|
||||
- the Kubernetes API Server kubelet client certificate
|
||||
- the Datastore certificate
|
||||
- the front proxy client certificate
|
||||
- the konnectivity certificate (if enabled)
|
||||
|
||||
Also, the following `kubeconfig` resources contain client certificates, which are created by Kamaji, such as:
|
||||
|
||||
- `admin`
|
||||
- `controller-manager`
|
||||
- `konnectivity` (if enabled)
|
||||
- `scheduler`
|
||||
|
||||
All the certificates are created with the `kubeadm` defaults, thus their validity is set to 1 year.
|
||||
|
||||
## How to rotate certificates
|
||||
|
||||
If you need to manually rotate one of these certificates, the required operation is the deletion for the given Secret.
|
||||
|
||||
```
|
||||
$: kubectl get secret
|
||||
NAME TYPE DATA AGE
|
||||
k8s-126-admin-kubeconfig Opaque 1 12m
|
||||
k8s-126-api-server-certificate Opaque 2 12m
|
||||
k8s-126-api-server-kubelet-client-certificate Opaque 2 3h45m
|
||||
k8s-126-ca Opaque 4 3h45m
|
||||
k8s-126-controller-manager-kubeconfig Opaque 1 3h45m
|
||||
k8s-126-datastore-certificate Opaque 3 3h45m
|
||||
k8s-126-datastore-config Opaque 4 3h45m
|
||||
k8s-126-front-proxy-ca-certificate Opaque 2 3h45m
|
||||
k8s-126-front-proxy-client-certificate Opaque 2 3h45m
|
||||
k8s-126-konnectivity-certificate kubernetes.io/tls 2 3h45m
|
||||
k8s-126-konnectivity-kubeconfig Opaque 1 3h45m
|
||||
k8s-126-sa-certificate Opaque 2 3h45m
|
||||
k8s-126-scheduler-kubeconfig Opaque 1 3h45m
|
||||
```
|
||||
|
||||
Once this operation is performed, Kamaji will be notified of the missing certificate, and it will create it back.
|
||||
|
||||
```
|
||||
$: kubectl delete secret -l kamaji.clastix.io/certificate_lifecycle_controller=x509
|
||||
secret "k8s-126-api-server-certificate" deleted
|
||||
secret "k8s-126-api-server-kubelet-client-certificate" deleted
|
||||
secret "k8s-126-front-proxy-client-certificate" deleted
|
||||
secret "k8s-126-konnectivity-certificate" deleted
|
||||
|
||||
$: kubectl delete secret -l kamaji.clastix.io/certificate_lifecycle_controller=x509
|
||||
NAME TYPE DATA AGE
|
||||
k8s-126-admin-kubeconfig Opaque 1 15m
|
||||
k8s-126-api-server-certificate Opaque 2 12s
|
||||
k8s-126-api-server-kubelet-client-certificate Opaque 2 12s
|
||||
k8s-126-ca Opaque 4 3h48m
|
||||
k8s-126-controller-manager-kubeconfig Opaque 1 3h48m
|
||||
k8s-126-datastore-certificate Opaque 3 3h48m
|
||||
k8s-126-datastore-config Opaque 4 3h48m
|
||||
k8s-126-front-proxy-ca-certificate Opaque 2 3h48m
|
||||
k8s-126-front-proxy-client-certificate Opaque 2 12s
|
||||
k8s-126-konnectivity-certificate kubernetes.io/tls 2 11s
|
||||
k8s-126-konnectivity-kubeconfig Opaque 1 3h48m
|
||||
k8s-126-sa-certificate Opaque 2 3h48m
|
||||
k8s-126-scheduler-kubeconfig Opaque 1 3h48m
|
||||
```
|
||||
|
||||
You can notice the secrets have been automatically created back, as well as a TenantControlPlane rollout with the updated certificates.
|
||||
|
||||
```
|
||||
$: kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
k8s-126-76768bdf89-82w8g 4/4 Running 0 58s
|
||||
k8s-126-76768bdf89-fwltl 4/4 Running 0 58s
|
||||
```
|
||||
|
||||
The same occurs with the `kubeconfig` ones.
|
||||
|
||||
```
|
||||
$: kubectl delete secret -l kamaji.clastix.io/certificate_lifecycle_controller=kubeconfig
|
||||
secret "k8s-126-admin-kubeconfig" deleted
|
||||
secret "k8s-126-controller-manager-kubeconfig" deleted
|
||||
secret "k8s-126-konnectivity-kubeconfig" deleted
|
||||
secret "k8s-126-scheduler-kubeconfig" deleted
|
||||
|
||||
$: kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
k8s-126-576c775b5d-2gr9h 4/4 Running 0 50s
|
||||
k8s-126-576c775b5d-jmvlm 4/4 Running 0 50s
|
||||
```
|
||||
|
||||
## Automatic certificates rotation
|
||||
|
||||
The Kamaji operator will run a controller which processes all the Secrets to determine their expiration, both for the `kubeconfig`, as well as for the certificates.
|
||||
|
||||
The controller, named `CertificateLifecycle`, will extract the certificates from the _Secret_ objects notifying the `TenantControlPlaneReconciler` controller which will start a new certificate rotation.
|
||||
The rotation will occur the day before their expiration.
|
||||
|
||||
> Nota Bene:
|
||||
>
|
||||
> Kamaji is responsible for creating the `etcd` client certificate, and the generation of a new one will occur.
|
||||
> For other Datastore drivers, such as MySQL or PostgreSQL, the referenced Secret will always be deleted by the Controller to trigger the rotation:
|
||||
> the PKI management, since it's offloaded externally, must provide the renewed certificates.
|
||||
|
||||
## Certificate Authority rotation
|
||||
|
||||
Kamaji is also taking care of your Tenant Clusters Certificate Authority.
|
||||
|
||||
This can be rotated manually by deleting the following secret.
|
||||
|
||||
```
|
||||
$: kubectl delete secret k8s-126-ca
|
||||
secret "k8s-126-ca" deleted
|
||||
```
|
||||
|
||||
Once this occurs the TenantControlPlane will enter in the `CertificateAuthorityRotating` status.
|
||||
|
||||
```
|
||||
$: kubectl get tcp -w
|
||||
NAME VERSION STATUS CONTROL-PLANE ENDPOINT KUBECONFIG DATASTORE AGE
|
||||
k8s-126 v1.26.0 Ready 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 Ready 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 Ready 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 Ready 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 Ready 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 Ready 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
```
|
||||
|
||||
This operation is intended to be performed manually since a new Certificate Authority requires the restart of all the components, as well as of the nodes:
|
||||
in such case, you will need to distribute the new Certificate Authority and the new nodes certificates.
|
||||
|
||||
Given the sensibility of such operation, the `Secret` controller will not check the _CA_, which is offering validity of 10 years as `kubeadm` default values.
|
||||
6
docs/content/guides/cluster-api.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Cluster APIs Support
|
||||
|
||||
The [Cluster API](https://github.com/kubernetes-sigs/cluster-api) brings declarative, Kubernetes-style APIs to creation of Kubernetes clusters, including configuration and management.
|
||||
|
||||
Kamaji offers seamless integration with the most popular Cluster API Infrastructure Providers. Check the currently supported providers and the roadmap on the related [reposistory](https://github.com/clastix/cluster-api-control-plane-provider-kamaji).
|
||||
|
||||
90
docs/content/guides/console.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Kamaji Console
|
||||
This guide will introduce you to the basics of the Kamaji Console, a web UI to help you to view and control your Kamaji setup.
|
||||
|
||||
## Install with Helm
|
||||
The Kamaji Console is a web interface running on the Kamaji Management Cluster that you can install with Helm. Check the Helm Chart [documentation](https://github.com/clastix/kamaji-console) for all the available settings.
|
||||
|
||||
The Kamaji Console requires a Secret in the Kamaji Management Cluster that contains the configuration and credentials to access the console from the browser. You can have the Helm Chart generate it for you, or create it yourself and provide the name of the Secret during installation. Before to install the Kamaji Console, access your workstation, replace the placeholders with actual values, and execute the following command:
|
||||
|
||||
```bash
|
||||
# The secret is required, otherwise the installation will fail
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: Opaque
|
||||
metadata:
|
||||
name: kamaji-console
|
||||
namespace: kamaji-system
|
||||
data:
|
||||
# Credentials to login into console
|
||||
ADMIN_EMAIL: <email>
|
||||
ADMIN_PASSWORD: <password>
|
||||
# Secret used to sign the browser session
|
||||
JWT_SECRET: <jwtSecret>
|
||||
# URL where the console is accessible: https://<hostname>/ui
|
||||
NEXTAUTH_URL: <nextAuthUrl>
|
||||
EOF
|
||||
```
|
||||
|
||||
Install the Chart with the release name `console` in the `kamaji-system` namespace:
|
||||
|
||||
```
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm -n kamaji-system install console clastix/kamaji-console
|
||||
```
|
||||
|
||||
Show the status:
|
||||
|
||||
```
|
||||
helm status console -n kamaji-system
|
||||
```
|
||||
|
||||
## Access the Kamaji Console
|
||||
Once installed, forward the console service to the local machine:
|
||||
|
||||
```
|
||||
kubectl -n kamaji-system port-forward service/console-kamaji-console 8080:80
|
||||
Forwarding from 127.0.0.1:8080 -> 3000
|
||||
Forwarding from [::1]:8080 -> 3000
|
||||
```
|
||||
|
||||
and point the browser to `http://127.0.0.1:8080/ui` to access the console. Login with credentials you stored into the secret.
|
||||
|
||||
!!! note "Expose with Ingress"
|
||||
The Kamaji Console can be exposed with an ingress. Refer the Helm Chart documentation on how to configure it properly.
|
||||
|
||||
## Explore the Kamaji Console
|
||||
The Kamaji Console provides a high level view of all Tenant Control Planes configured in your Kamaji setup. When you login to the console you are brought to the Tenant Control Planes view, which allows you to quickly understand the state of your Kamaji setup at a glance. It shows summary information about all the Tenant Control Plane objects, including: name, namespace, status, endpoint, version, and datastore.
|
||||
|
||||

|
||||
|
||||
From this view, you can also create a new Tenant Control Plane from a basic placeholder in yaml format:
|
||||
|
||||

|
||||
|
||||
### Working with Tenant Control Plane
|
||||
From the main view, clicking on a Tenant Control Plane row will bring you to the detailed view. This view shows you all the details about the selected Tenant Control Plane, including all child components: pods, deployment, service, config maps, and secrets. From this view, you can also view, copy, and download the `kubeconfig` to access the Tenant Control Plane as tenant admin.
|
||||
|
||||

|
||||
|
||||
### Working with Datastore
|
||||
From the menu bar on the left, clicking on the Datastores item, you can access the list of provisioned Datastores. It shows a summary about datastores, including name and the used driver, i.e. etcd, mysql, and postgresql.
|
||||
|
||||

|
||||
|
||||
From this view, you can also create, delete, edit, and inspect the single datastore.
|
||||
|
||||
### Additional Operations
|
||||
The Kamaji Console offers additional capabilities as part of the commercial edition Clastix Operating Platform:
|
||||
|
||||
- Infrastructure Drivers Management
|
||||
- Applications Delivery via GitOps Operators
|
||||
- Centralized Authentication and Access Control
|
||||
- Auditing and Logging
|
||||
- Monitoring
|
||||
- Backup & Restore
|
||||
|
||||
!!! note "Ready for more?"
|
||||
To purchase entitlement to Clastix Operating Platform please contact hello@clastix.io.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Datastore Migration
|
||||
|
||||
On the admin cluster, you can deploy one or more multi-tenant datastores as `etcd`, `PostgreSQL`, and `MySQL` to save the state of the tenant clusters. A Tenant Control Plane can be migrated from a datastore to another one without service disruption or without complex and error prone backup & restore procedures.
|
||||
On the Management Cluster, you can deploy one or more multi-tenant datastores as `etcd`, `PostgreSQL`, and `MySQL` to save the state of the Tenant Clusters. A Tenant Control Plane can be migrated from a datastore to another one without service disruption or without complex and error prone backup & restore procedures.
|
||||
|
||||
This guide will assist you to live migrate Tenant's data from a datastore to another one having the same `etcd` driver.
|
||||
|
||||
@@ -169,3 +169,6 @@ admission webhook "catchall.migrate.kamaji.clastix.io" denied the request
|
||||
After a while, depending on the amount of data to migrate, the Tenant Control Plane is put back in full operating mode by the Kamaji controller.
|
||||
|
||||
> Please, note the datastore migration leaves the data on the default datastore, so you have to remove it manually.
|
||||
|
||||
## Post migration
|
||||
After migrating data to the new datastore, complete the migration procedure by restarting the `kubelet.service` on all the tenant worker nodes.
|
||||
@@ -1,31 +1,33 @@
|
||||
# Setup Kamaji on Azure
|
||||
This guide will lead you through the process of creating a working Kamaji setup on on MS Azure.
|
||||
|
||||
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
|
||||
!!! warning ""
|
||||
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
|
||||
|
||||
The guide requires:
|
||||
|
||||
- one bootstrap workstation
|
||||
- an AKS Kubernetes cluster to run the Admin and Tenant Control Planes
|
||||
- an arbitrary number of Azure virtual machines to host `Tenant`s' workloads
|
||||
- a bootstrap machine
|
||||
- a Kubernetes cluster to run the Admin and Tenant Control Planes
|
||||
- an arbitrary number of machines to host `Tenant`s' workloads
|
||||
|
||||
## Summary
|
||||
|
||||
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
|
||||
* [Access Admin cluster](#access-admin-cluster)
|
||||
* [Access Management Cluster](#access-management-cluster)
|
||||
* [Install Cert Manager](#install-cert-manager)
|
||||
* [Install Kamaji controller](#install-kamaji-controller)
|
||||
* [Create Tenant Cluster](#create-tenant-cluster)
|
||||
* [Cleanup](#cleanup)
|
||||
|
||||
## Prepare the bootstrap workspace
|
||||
This guide is supposed to be run from a remote or local bootstrap machine. First, clone the repo and prepare the workspace directory:
|
||||
On the bootstrap machine, clone the repo and prepare the workspace directory:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/clastix/kamaji
|
||||
cd kamaji/deploy
|
||||
```
|
||||
|
||||
We assume you have installed on your workstation:
|
||||
We assume you have installed on the bootstrap machine:
|
||||
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
|
||||
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
|
||||
@@ -39,10 +41,10 @@ Make sure you have a valid Azure subscription, and login to Azure:
|
||||
az account set --subscription "MySubscription"
|
||||
az login
|
||||
```
|
||||
> Currently, the Kamaji setup, including Admin and Tenant clusters need to be deployed within the same Azure region. Cross-regions deployments are not supported.
|
||||
|
||||
## Access Admin cluster
|
||||
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters. For this guide, we're going to use an instance of Azure Kubernetes Service - AKS as the Admin Cluster.
|
||||
|
||||
## Access Management Cluster
|
||||
In Kamaji, a Management Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The Management Cluster acts as cockpit for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant Clusters. For this guide, we're going to use an instance of Azure Kubernetes Service (AKS) as Management Cluster.
|
||||
|
||||
Throughout the following instructions, shell variables are used to indicate values that you should adjust to your own Azure environment:
|
||||
|
||||
@@ -95,11 +97,24 @@ And check you can access:
|
||||
kubectl cluster-info
|
||||
```
|
||||
|
||||
## Install Cert Manager
|
||||
|
||||
Kamaji takes advantage of the [dynamic admission control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), such as validating and mutating webhook configurations. These webhooks are secured by a TLS communication, and the certificates are managed by [`cert-manager`](https://cert-manager.io/), making it a prerequisite that must be installed:
|
||||
|
||||
```bash
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update
|
||||
helm install \
|
||||
cert-manager jetstack/cert-manager \
|
||||
--namespace cert-manager \
|
||||
--create-namespace \
|
||||
--version v1.11.0 \
|
||||
--set installCRDs=true
|
||||
```
|
||||
|
||||
## Install Kamaji Controller
|
||||
|
||||
Kamaji takes advantage of the [dynamic admission control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), such as validating and mutating webhook configurations. These webhooks are secured by a TLS communication, and the certificates are managed by [`cert-manager`](https://cert-manager.io/), making it a prerequisite that must be [installed](https://cert-manager.io/docs/installation/).
|
||||
|
||||
The Kamaji controller needs to access a default datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unamanaged `etcd`, out of box.
|
||||
Installing Kamaji via Helm charts is the preferred way. The Kamaji controller needs to access a Datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unmanaged `etcd` as datastore, out of box.
|
||||
|
||||
Install Kamaji with `helm` using an unmanaged `etcd` as default datastore:
|
||||
|
||||
@@ -109,7 +124,8 @@ helm repo update
|
||||
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
|
||||
```
|
||||
|
||||
A managed datastore is highly recommended in production. The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a managed multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a different storage system, as `MySQL` or `PostgreSQL` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
|
||||
!!! note "A managed datastore is highly recommended in production"
|
||||
The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides the code to setup a multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a more robust storage system, as `MySQL` or `PostgreSQL` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
|
||||
|
||||
## Create Tenant Cluster
|
||||
|
||||
@@ -128,6 +144,8 @@ kind: TenantControlPlane
|
||||
metadata:
|
||||
name: ${TENANT_NAME}
|
||||
namespace: ${TENANT_NAMESPACE}
|
||||
labels:
|
||||
tenant.clastix.io: ${TENANT_NAME}
|
||||
spec:
|
||||
dataStore: default
|
||||
controlPlane:
|
||||
@@ -256,8 +274,13 @@ NAME ENDPOINTS AGE
|
||||
kubernetes 10.240.0.100:6443 57m
|
||||
```
|
||||
|
||||
### Prepare worker nodes to join
|
||||
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other tools, as for example, Terrform.
|
||||
### Join worker nodes
|
||||
|
||||
The Tenant Control Plane is made of pods running in the Kamaji Management Cluster. At this point, the Tenant Cluster has no worker nodes. So, the next step is to join some worker nodes to the Tenant Control Plane.
|
||||
|
||||
Kamaji does not provide any helper for creation of tenant worker nodes, instead it leverages the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Currently, a Cluster API `ControlPlane` provider for Azure is not yet available: check the road-map on the [official repository](https://github.com/clastix/cluster-api-control-plane-provider-kamaji).
|
||||
|
||||
An alternative approach to create and join worker nodes in Azure is to manually create the VMs, turn them into Kubernetes worker nodes and then join through the `kubeadm` command.
|
||||
|
||||
Create an Azure VM Stateful Set to host worker nodes
|
||||
|
||||
@@ -275,7 +298,6 @@ az vmss create \
|
||||
--vnet-name $KAMAJI_VNET_NAME \
|
||||
--subnet $TENANT_SUBNET_NAME \
|
||||
--computer-name-prefix $TENANT_NAME- \
|
||||
--custom-data ./tenant-cloudinit.yaml \
|
||||
--load-balancer "" \
|
||||
--instance-count 0
|
||||
|
||||
@@ -290,15 +312,20 @@ az vmss scale \
|
||||
--new-capacity 3
|
||||
```
|
||||
|
||||
### Join worker nodes
|
||||
The current approach for joining nodes is to use `kubeadm` and therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable:
|
||||
Once all the machines are ready, follow the related [documentation](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) in order to:
|
||||
|
||||
- install `containerd` as container runtime
|
||||
- install `crictl`, the command line for working with `containerd`
|
||||
- install `kubectl`, `kubelet`, and `kubeadm` in the desired version
|
||||
|
||||
After the installation is complete on all the nodes, store the entire command of joining in a variable:
|
||||
|
||||
```bash
|
||||
TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."spec.loadBalancerIP")
|
||||
JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command |cut -d" " -f4-)
|
||||
```
|
||||
|
||||
A bash loop will be used to join all the available nodes.
|
||||
Use a loop to log in to and run the join command on each node:
|
||||
|
||||
```bash
|
||||
VMIDS=($(az vmss list-instances \
|
||||
@@ -343,7 +370,7 @@ As per [documentation](https://projectcalico.docs.tigera.io/reference/public-clo
|
||||
- `CALICO_IPV4POOL_IPIP="Never"`
|
||||
- `CALICO_IPV4POOL_VXLAN="Always"`
|
||||
|
||||
Apply to the tenant cluster:
|
||||
Apply to the Tenant Cluster:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig apply -f calico.yaml
|
||||
@@ -1,323 +0,0 @@
|
||||
# Setup Kamaji on a generic infrastructure
|
||||
This guide will lead you through the process of creating a working Kamaji setup on a generic infrastructure, either virtual or bare metal.
|
||||
|
||||
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
|
||||
|
||||
The guide requires:
|
||||
|
||||
- one bootstrap workstation
|
||||
- a Kubernetes cluster to run the Admin and Tenant Control Planes
|
||||
- an arbitrary number of machines to host `Tenant`s' workloads
|
||||
|
||||
## Summary
|
||||
|
||||
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
|
||||
* [Access Admin cluster](#access-admin-cluster)
|
||||
* [Install Kamaji controller](#install-kamaji-controller)
|
||||
* [Create Tenant Cluster](#create-tenant-cluster)
|
||||
* [Cleanup](#cleanup)
|
||||
|
||||
## Prepare the bootstrap workspace
|
||||
This guide is supposed to be run from a remote or local bootstrap machine. First, clone the repo and prepare the workspace directory:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/clastix/kamaji
|
||||
cd kamaji/deploy
|
||||
```
|
||||
|
||||
We assume you have installed on your workstation:
|
||||
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
|
||||
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
|
||||
- [helm](https://helm.sh/docs/intro/install/)
|
||||
- [jq](https://stedolan.github.io/jq/)
|
||||
|
||||
## Access Admin cluster
|
||||
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters.
|
||||
|
||||
Throughout the following instructions, shell variables are used to indicate values that you should adjust to your environment:
|
||||
|
||||
```bash
|
||||
source kamaji.env
|
||||
```
|
||||
|
||||
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the admin cluster should provide:
|
||||
|
||||
- CNI module installed, eg. [Calico](https://github.com/projectcalico/calico), [Cilium](https://github.com/cilium/cilium).
|
||||
- CSI module installed with a Storage Class for the Tenant datastores. Local Persistent Volumes are an option.
|
||||
- Support for LoadBalancer service type, eg. [MetalLB](https://metallb.universe.tf/), or alternatively, an Ingress Controller, eg. [ingress-nginx](https://github.com/kubernetes/ingress-nginx), [haproxy](https://github.com/haproxytech/kubernetes-ingress).
|
||||
- Optionally, a Monitoring Stack installed, eg. [Prometheus](https://github.com/prometheus-community).
|
||||
|
||||
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into Kamaji Admin Cluster and check you can access:
|
||||
|
||||
```bash
|
||||
kubectl cluster-info
|
||||
```
|
||||
|
||||
## Install Kamaji Controller
|
||||
|
||||
Kamaji takes advantage of the [dynamic admission control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), such as validating and mutating webhook configurations. These webhooks are secured by a TLS communication, and the certificates are managed by [`cert-manager`](https://cert-manager.io/), making it a prerequisite that must be [installed](https://cert-manager.io/docs/installation/).
|
||||
|
||||
The Kamaji controller needs to access a default datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unamanaged `etcd`, out of box.
|
||||
|
||||
Install Kamaji with `helm` using an unmanaged `etcd` as default datastore:
|
||||
|
||||
```bash
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
|
||||
```
|
||||
|
||||
A managed datastore is highly recommended in production. The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a managed multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a different storage system, as `MySQL` or `PostgreSQL` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
|
||||
|
||||
## Create Tenant Cluster
|
||||
|
||||
### Tenant Control Plane
|
||||
|
||||
A tenant control plane of example looks like:
|
||||
|
||||
```yaml
|
||||
cat > ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml <<EOF
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: ${TENANT_NAME}
|
||||
namespace: ${TENANT_NAMESPACE}
|
||||
spec:
|
||||
dataStore: default
|
||||
controlPlane:
|
||||
deployment:
|
||||
replicas: 3
|
||||
additionalMetadata:
|
||||
labels:
|
||||
tenant.clastix.io: ${TENANT_NAME}
|
||||
extraArgs:
|
||||
apiServer: []
|
||||
controllerManager: []
|
||||
scheduler: []
|
||||
resources:
|
||||
apiServer:
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 512Mi
|
||||
limits: {}
|
||||
controllerManager:
|
||||
requests:
|
||||
cpu: 125m
|
||||
memory: 256Mi
|
||||
limits: {}
|
||||
scheduler:
|
||||
requests:
|
||||
cpu: 125m
|
||||
memory: 256Mi
|
||||
limits: {}
|
||||
service:
|
||||
additionalMetadata:
|
||||
labels:
|
||||
tenant.clastix.io: ${TENANT_NAME}
|
||||
serviceType: LoadBalancer
|
||||
kubernetes:
|
||||
version: ${TENANT_VERSION}
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
admissionControllers:
|
||||
- ResourceQuota
|
||||
- LimitRanger
|
||||
networkProfile:
|
||||
port: ${TENANT_PORT}
|
||||
certSANs:
|
||||
- ${TENANT_NAME}.${TENANT_DOMAIN}
|
||||
serviceCidr: ${TENANT_SVC_CIDR}
|
||||
podCidr: ${TENANT_POD_CIDR}
|
||||
dnsServiceIPs:
|
||||
- ${TENANT_DNS_SERVICE}
|
||||
addons:
|
||||
coreDNS: {}
|
||||
kubeProxy: {}
|
||||
konnectivity:
|
||||
server:
|
||||
port: ${TENANT_PROXY_PORT}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits: {}
|
||||
EOF
|
||||
|
||||
kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
|
||||
```
|
||||
|
||||
After a few seconds, check the created resources in the tenants namespace and when ready it will look similar to the following:
|
||||
|
||||
```command
|
||||
kubectl -n tenants get tcp,deploy,pods,svc
|
||||
|
||||
NAME VERSION STATUS CONTROL-PLANE ENDPOINT KUBECONFIG DATASTORE AGE
|
||||
tenantcontrolplane/tenant-00 v1.25.2 Ready 192.168.32.240:6443 tenant-00-admin-kubeconfig default 2m20s
|
||||
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
deployment.apps/tenant-00 3/3 3 3 118s
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
pod/tenant-00-58847c8cdd-7hc4n 4/4 Running 0 82s
|
||||
pod/tenant-00-58847c8cdd-ft5xt 4/4 Running 0 82s
|
||||
pod/tenant-00-58847c8cdd-shc7t 4/4 Running 0 82s
|
||||
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
service/tenant-00 LoadBalancer 10.32.132.241 192.168.32.240 6443:32152/TCP,8132:32713/TCP 2m20s
|
||||
```
|
||||
|
||||
The regular Tenant Control Plane containers: `kube-apiserver`, `kube-controller-manager`, `kube-scheduler` are running unchanged in the `tcp` pods instead of dedicated machines and they are exposed through a service on the port `6443` of worker nodes in the admin cluster.
|
||||
|
||||
The `LoadBalancer` service type is used to expose the Tenant Control Plane on the assigned `loadBalancerIP` acting as `ControlPlaneEndpoint` for the worker nodes and other clients as, for example, `kubectl`. Service types `NodePort` and `ClusterIP` are still viable options to expose the Tenant Control Plane, depending on the case. High Availability and rolling updates of the Tenant Control Planes are provided by the `tcp` Deployment and all the resources reconcilied by the Kamaji controller.
|
||||
|
||||
### Working with Tenant Control Plane
|
||||
|
||||
Collect the external IP address of the `tcp` service:
|
||||
|
||||
```bash
|
||||
TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."spec.loadBalancerIP")
|
||||
```
|
||||
|
||||
and check it out:
|
||||
|
||||
```bash
|
||||
curl -k https://${TENANT_ADDR}:${TENANT_PORT}/healthz
|
||||
curl -k https://${TENANT_ADDR}:${TENANT_PORT}/version
|
||||
```
|
||||
|
||||
The `kubeconfig` required to access the Tenant Control Plane is stored in a secret:
|
||||
|
||||
```bash
|
||||
kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-admin-kubeconfig -o json \
|
||||
| jq -r '.data["admin.conf"]' \
|
||||
| base64 --decode \
|
||||
> ${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
|
||||
```
|
||||
|
||||
and let's check it out:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig cluster-info
|
||||
|
||||
Kubernetes control plane is running at https://192.168.32.240:6443
|
||||
CoreDNS is running at https://192.168.32.240:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
|
||||
```
|
||||
|
||||
Check out how the Tenant control Plane advertises itself to workloads:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get svc
|
||||
|
||||
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
default kubernetes ClusterIP 10.32.0.1 <none> 443/TCP 6m
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get ep
|
||||
|
||||
NAME ENDPOINTS AGE
|
||||
kubernetes 192.168.32.240:6443 18m
|
||||
```
|
||||
|
||||
And make sure it is `${TENANT_ADDR}:${TENANT_PORT}`.
|
||||
|
||||
### Prepare worker nodes to join
|
||||
|
||||
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other tools, as for example, Terraform.
|
||||
|
||||
You can use the provided helper script `/deploy/nodes-prerequisites.sh`, in order to install the dependencies on all the worker nodes:
|
||||
|
||||
- Install `containerd` as container runtime
|
||||
- Install `crictl`, the command line for working with `containerd`
|
||||
- Install `kubectl`, `kubelet`, and `kubeadm` in the desired version
|
||||
|
||||
> Warning: the script assumes all worker nodes are running `Ubuntu 20.04`. Make sure to adapt the script if you're using a different distribution.
|
||||
|
||||
Run the script:
|
||||
|
||||
```bash
|
||||
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
|
||||
./nodes-prerequisites.sh ${TENANT_VERSION:1} ${HOSTS[@]}
|
||||
```
|
||||
|
||||
### Join worker nodes
|
||||
The current approach for joining nodes is to use `kubeadm` and therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable:
|
||||
|
||||
```bash
|
||||
JOIN_CMD=$(echo "sudo ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command)
|
||||
```
|
||||
|
||||
A bash loop will be used to join all the available nodes.
|
||||
|
||||
```bash
|
||||
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
|
||||
for i in "${!HOSTS[@]}"; do
|
||||
HOST=${HOSTS[$i]}
|
||||
ssh ${USER}@${HOST} -t ${JOIN_CMD};
|
||||
done
|
||||
```
|
||||
|
||||
Checking the nodes:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
|
||||
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
tenant-00-worker-00 NotReady <none> 25s v1.25.0
|
||||
tenant-00-worker-01 NotReady <none> 17s v1.25.0
|
||||
tenant-00-worker-02 NotReady <none> 9s v1.25.0
|
||||
```
|
||||
|
||||
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In this guide, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico), but feel free to use one of your taste.
|
||||
|
||||
Download the latest stable Calico manifest:
|
||||
|
||||
```bash
|
||||
curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/calico.yaml -O
|
||||
```
|
||||
|
||||
Before to apply the Calico manifest, you can customize it as necessary according to your preferences.
|
||||
|
||||
Apply to the tenant cluster:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig apply -f calico.yaml
|
||||
```
|
||||
|
||||
And after a while, nodes will be ready
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
tenant-00-worker-00 Ready <none> 2m48s v1.25.0
|
||||
tenant-00-worker-01 Ready <none> 2m40s v1.25.0
|
||||
tenant-00-worker-02 Ready <none> 2m32s v1.25.0
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
Remove the worker nodes joined the tenant control plane
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig delete nodes --all
|
||||
```
|
||||
|
||||
For each worker node, login and clean it
|
||||
|
||||
```bash
|
||||
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
|
||||
for i in "${!HOSTS[@]}"; do
|
||||
HOST=${HOSTS[$i]}
|
||||
ssh ${USER}@${HOST} -t 'sudo kubeadm reset -f';
|
||||
ssh ${USER}@${HOST} -t 'sudo rm -rf /etc/cni/net.d';
|
||||
ssh ${USER}@${HOST} -t 'sudo systemctl reboot';
|
||||
done
|
||||
```
|
||||
|
||||
Delete the tenant control plane from kamaji
|
||||
|
||||
```bash
|
||||
kubectl delete -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
|
||||
```
|
||||
|
||||
That's all folks!
|
||||
@@ -1,14 +1,14 @@
|
||||
# Manage tenant resources GitOps-way from the admin cluster
|
||||
# Manage Tenant Control Planes with GitOps
|
||||
|
||||
This guide describe a declarative way to deploy Kubernetes add-ons across multiple Tenant Clusters, the GitOps-way. An admin may need to apply a specific workload into Tenant Clusters and ensure is constantly reconciled, no matter what the tenants will do in their clusters. Examples include installing monitoring agents, ensuring specific policies, installing infrastructure operators like Cert Manager and so on.
|
||||
|
||||
This way the tenant resources can be ensured from a single pane of glass, from the *admin cluster*.
|
||||
This way the tenant resources can be ensured from a single pane of glass, from the *Management Cluster*.
|
||||
|
||||
## Flux as the GitOps operator
|
||||
|
||||
As GitOps ensures a constant reconciliation to a Git-versioned desired state, [Flux](https://fluxcd.io) can satisfy the requirement of those scenarios. In particular, the controllers that reconcile [resources](https://fluxcd.io/flux/concepts/#reconciliation) support communicating to external clusters.
|
||||
|
||||
In this scenario the Flux toolkit would run in the *admin cluster*, with reconcile controllers reconciling resources into *tenant clusters*.
|
||||
In this scenario the Flux toolkit would run in the *Management Cluster*, with reconcile controllers reconciling resources into *Tenant Clusters*.
|
||||
|
||||

|
||||
|
||||
@@ -29,7 +29,7 @@ tenant1 v1.25.1 Ready 172.18.0.2:31443 tenant1-admin-kubeconfig
|
||||
|
||||
> As the *admin* user has *cluster-admin* `ClusterRole` it will have the necessary privileges to operate on Custom Resources too.
|
||||
|
||||
Given that Flux it's installed in the *admin cluster* - guide [here](https://fluxcd.io/flux/installation/) - resources can be ensured for specifics tenant clusters, by filling the `spec.kubeConfig` field of the Flux reconciliation resource.
|
||||
Given that Flux it's installed in the *Management Cluster* - guide [here](https://fluxcd.io/flux/installation/) - resources can be ensured for specifics Tenant Clusters, by filling the `spec.kubeConfig` field of the Flux reconciliation resource.
|
||||
|
||||
For example, it might be needed to ensure [cert-manager](https://cert-manager.io/) is installed into a *tenant1* cluster with Helm. It can be done by declaring an `HelmRelease` as follows:
|
||||
|
||||
@@ -69,7 +69,7 @@ spec:
|
||||
replicaCount: 2
|
||||
```
|
||||
|
||||
and applying it in the *admin cluster*, alongside the related *jetstack* `HelmRepository`, in the *tenants* `Namespace`.
|
||||
and applying it in the *Management Cluster*, alongside the related *jetstack* `HelmRepository`, in the *tenants* `Namespace`.
|
||||
|
||||
The result would be having Cert Manager installed in the *default* `Namespace` of the tenant *tenant1*'s cluster:
|
||||
|
||||
@@ -82,7 +82,7 @@ tenant1-cert-manager-cainjector 1/1 1 1 4m3s
|
||||
tenant1-cert-manager-webhook 1/1 1 1 4m3s
|
||||
```
|
||||
|
||||
No matter what the tenant users will do on the *tenant cluster*, the Flux reconciliation controllers wirunning in the *admin cluster* will ensure the desired state declared by the reconciliation resources applied existing in the *admin cluster*, will be reconciled in the *tenant cluster*.
|
||||
No matter what the tenant users will do on the *Tenant Cluster*, the Flux reconciliation controllers wirunning in the *Management Cluster* will ensure the desired state declared by the reconciliation resources applied existing in the *Management Cluster*, will be reconciled in the *Tenant Cluster*.
|
||||
|
||||
Furthermore, this approach does not need to have in each tenant cluster nor Flux neither applied the related reconciliation Custom Resorces.
|
||||
Furthermore, this approach does not need to have in each Tenant Cluster nor Flux neither applied the related reconciliation Custom Resorces.
|
||||
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
# MySQL as Kubernetes Storage
|
||||
|
||||
Kamaji offers the possibility of having a different storage system than `ETCD` thanks to [kine](https://github.com/k3s-io/kine) integration. One of the implementations is [MySQL](https://www.mysql.com/).
|
||||
|
||||
> A detailed guide for production setup will be released soon. Please refer to [Getting Started Guide](../getting-started.md) for a demo setup with KinD.
|
||||
@@ -1,6 +0,0 @@
|
||||
# PostgreSQL as Kubernetes Storage
|
||||
|
||||
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine) integration.
|
||||
One of the implementations is [PostgreSQL](https://www.postgresql.org/).
|
||||
|
||||
> A detailed guide for production setup will be released soon. Please refer to [Getting Started Guide](../getting-started.md) for a demo setup with KinD.
|
||||
@@ -1,5 +1,5 @@
|
||||
# Tenant Cluster Upgrade
|
||||
The process of upgrading a _“tenant cluster”_ consists in two steps:
|
||||
The process of upgrading a _“Tenant Cluster”_ consists in two steps:
|
||||
|
||||
1. Upgrade the Tenant Control Plane
|
||||
2. Upgrade of Tenant Worker Nodes
|
||||
@@ -14,6 +14,8 @@ apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: tenant-00
|
||||
labels:
|
||||
tenant.clastix.io: tenant-00
|
||||
spec:
|
||||
controlPlane:
|
||||
deployment:
|
||||
@@ -27,6 +29,9 @@ spec:
|
||||
```
|
||||
|
||||
## Upgrade of Tenant Worker Nodes
|
||||
As currently Kamaji is not providing any helpers for Tenant Worker Nodes, you should make sure to upgrade them manually, for example, with the help of `kubeadm`. Refer to the official [documentation](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/#upgrade-worker-nodes).
|
||||
|
||||
> We have in roadmap, the Cluster APIs support so that you can upgrade _“tenant clusters”_ in a fully declarative way.
|
||||
As currently Kamaji is not providing any helpers for Tenant Worker Nodes, you should make sure to upgrade them manually, for example, with the help of `kubeadm`.
|
||||
Refer to the official [documentation](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/#upgrade-worker-nodes).
|
||||
|
||||
Kamaji is offering a [Cluster API Control Plane provider](https://github.com/clastix/cluster-api-control-plane-provider-kamaji), thus integrating with the Kubernetes clusters declarative management approach.
|
||||
You can refer to the official [Cluster API documentation](https://cluster-api.sigs.k8s.io/).
|
||||
|
||||
BIN
docs/content/images/architecture.png
Normal file
|
After Width: | Height: | Size: 163 KiB |
BIN
docs/content/images/console-ds-list.png
Normal file
|
After Width: | Height: | Size: 150 KiB |
BIN
docs/content/images/console-tcp-create.png
Normal file
|
After Width: | Height: | Size: 207 KiB |
BIN
docs/content/images/console-tcp-list.png
Normal file
|
After Width: | Height: | Size: 249 KiB |
BIN
docs/content/images/console-tcp-view.png
Normal file
|
After Width: | Height: | Size: 304 KiB |
BIN
docs/content/images/favicon.png
Normal file
|
After Width: | Height: | Size: 31 KiB |
|
Before Width: | Height: | Size: 189 KiB |
|
Before Width: | Height: | Size: 184 KiB |
BIN
docs/content/images/logo.png
Normal file
|
After Width: | Height: | Size: 14 KiB |
@@ -1,51 +1,48 @@
|
||||
# Kamaji
|
||||
**Kamaji** deploys and operates Kubernetes at scale with a fraction of the operational burden.
|
||||
|
||||
**Kamaji** is a **Kubernetes Control Plane Manager**. It operates Kubernetes at scale with a fraction of the operational burden.
|
||||
|
||||
## How it works
|
||||
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. What makes Kamaji special is that Control Planes of _“tenant clusters”_ are just regular pods running in the _“admin cluster”_ instead of dedicated Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate. View [Concepts](concepts.md) for a deeper understanding of principles behind Kamaji's design.
|
||||
Kamaji turns any Kubernetes cluster into a _“Management Cluster”_ to orchestrate other Kubernetes clusters called _“Tenant Clusters”_. Kamaji is special because the Control Plane components are running inside pods instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
|
||||
|
||||

|
||||

|
||||
<img src="images/architecture.png" width="600">
|
||||
|
||||
All the tenant clusters built with Kamaji are fully compliant [CNCF Certified Kubernetes](https://www.cncf.io/certification/software-conformance/) and are compatible with the standard toolchains everybody knows and loves.
|
||||
View [Concepts](concepts.md) for a deeper understanding of principles behind Kamaji's design.
|
||||
|
||||
<p align="center" style="padding: 6px 6px">
|
||||
<img src="https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/certified-kubernetes/versionless/color/certified-kubernetes-color.png" width="200" />
|
||||
</p>
|
||||
|
||||
## Features
|
||||
|
||||
- **Self Service Kubernetes:** leave users the freedom to self-provision their Kubernetes clusters according to the assigned boundaries.
|
||||
- **Multi-cluster Management:** centrally manage multiple tenant clusters from a single admin cluster. Happy SREs.
|
||||
- **Cheaper Control Planes:** place multiple tenant control planes on a single node, instead of having three nodes for a single control plane.
|
||||
- **Stronger Multi-Tenancy:** leave tenants to access the control plane with admin permissions while keeping the tenant isolated at the infrastructure level.
|
||||
- **Kubernetes Inception:** use Kubernetes to manage Kubernetes by re-using all the Kubernetes goodies you already know and love.
|
||||
- **Full APIs compliant:** tenant clusters are fully CNCF compliant built with upstream Kubernetes binaries. A user does not see differences between a Kamaji provisioned cluster and a dedicated cluster.
|
||||
!!! info "CNCF Compliance"
|
||||
All the Tenant Clusters built with Kamaji are fully compliant [CNCF Certified Kubernetes](https://www.cncf.io/certification/software-conformance/) and are compatible with the standard toolchains everybody knows and loves.
|
||||
|
||||
## Getting started
|
||||
|
||||
Please refer to the [Getting Started guide](getting-started.md) to deploy a minimal setup of Kamaji on [KinD](https://kind.sigs.k8s.io/).
|
||||
Please refer to the [Getting Started guide](getting-started.md) to deploy a minimal setup of Kamaji.
|
||||
|
||||
## Open Source
|
||||
Kamaji is Open Source with Apache 2 license and any contribution is welcome. Open an issue or suggest an enhancement on the GitHub [project's page](https://github.com/clastix/kamaji). Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
|
||||
|
||||
## FAQs
|
||||
Q. What does Kamaji mean?
|
||||
|
||||
A. Kamaji is named as the character _Kamaji_ from the Japanese movie [_Spirited Away_](https://en.wikipedia.org/wiki/Spirited_Away).
|
||||
A. Kamaji is named as the character _Kamajī_ (釜爺, lit. "Boiler Geezer") from the Japanese movie [_Spirited Away_](https://en.wikipedia.org/wiki/Spirited_Away). Kamajī is an elderly man with six, long arms who operates the boiler room of the Bathhouse. The silent professional, whom no one sees, but who gets the hot, fragrant water to all the guests, like our Kamaji provides Kubernetes as a service!
|
||||
|
||||
Q. Is Kamaji another Kubernetes distribution?
|
||||
Q. Is Kamaji another Kubernetes distribution yet?
|
||||
|
||||
A. No, Kamaji is a Kubernetes Operator you can install on top of any Kubernetes cluster to provide hundreds or thousands of managed Kubernetes clusters as a service. We tested Kamaji on vanilla Kubernetes 1.22+, KinD, and Azure AKS. We expect it to work smoothly on other Kubernetes distributions. The tenant clusters made with Kamaji are conformant CNCF Kubernetes clusters as we leverage [`kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
|
||||
A. No, Kamaji is a Kubernetes Operator you can install on top of any Kubernetes cluster to provide hundreds or thousands of managed Kubernetes clusters as a service. The tenant clusters made with Kamaji are conformant CNCF Kubernetes clusters as we leverage [`kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
|
||||
|
||||
Q. How is Kamaji different from typical multi-cluster management solutions?
|
||||
|
||||
A. Most of the existing multi-cluster management solutions provision specific infrastructure for the control plane, in most cases dedicated machines. Kamaji is special because the control plane of the downstream clusters are regular pods running in the management cluster. This solution makes running control plane at scale cheaper and easier to deploy and operate.
|
||||
|
||||
Q. Is it safe to run Kubernetes control plane components in a pod instead of dedicated virtual machines?
|
||||
|
||||
A. Yes, the tenant control plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
|
||||
A. Yes, the tenant control plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used, no forks!.
|
||||
|
||||
Q. How is Kamaji different from managed Kubernetes services offered by Public Clouds?
|
||||
|
||||
A. Kamaji gives you full control over all your Kubernetes infrastructures, offering unparalleled consistency across disparate environments: cloud, data-center, and edge while simplifying and centralizing operations, maintenance, and management tasks. Unlike other Managed Kubernetes services, Kamaji allows you to connect worker nodes from any infrastructure, providing you greater freedom, flexibility, and consistency than public Managed Kubernetes services.
|
||||
|
||||
Q. How Kamaji differs from Cluster API?
|
||||
|
||||
A. Kamaji and Cluster API complement each other. Kamaji's core idea is having a more efficient control plane management. Cluster API provides a declarative approach to clusters bootstrap and lifecycle management across different environments, cloud providers, and on-premises infrastructures. Thus combined together you get the best of class: Kamaji by simplifying the Control Plane management, Cluster API to abstract from the infrastructure. See supported [CAPI providers](guides/cluster-api.md) by Kamaji.
|
||||
|
||||
Q. You already provide a Kubernetes multi-tenancy solution with [Capsule](https://capsule.clastix.io). Why does Kamaji matter?
|
||||
|
||||
A. A multi-tenancy solution, like Capsule shares the Kubernetes control plane among all tenants keeping tenant namespaces isolated by policies. While the solution is the right choice by balancing between features and ease of usage, there are cases where a tenant user requires access to the control plane, for example, when a tenant requires to manage CRDs on his own. With Kamaji, you can provide cluster admin permissions to the tenant.
|
||||
A. A multi-tenancy solution, like Capsule shares the Kubernetes control plane among all tenants keeping tenant namespaces isolated by policies. While the solution is the right choice by balancing between features and ease of usage, there are cases where a tenant user requires access to the control plane, for example, when a tenant requires to manage CRDs on his own. With Kamaji, you can provide full cluster admin permissions to the tenant.
|
||||
|
||||
Q. Well you convinced me, how to get a try?
|
||||
|
||||
A. It is possible to get started with Kamaji on a laptop with [KinD](getting-started.md) installed.
|
||||
|
||||
@@ -5,7 +5,7 @@ Kamaji has been designed to operate a large scale of Kubernetes Tenant Control P
|
||||
In the Operator jargon, a manager is created to start several controllers, each one with their own responsibility.
|
||||
When a manager is started, all the underlying controllers are started, along with other "runnable" resources, like the webhook server.
|
||||
|
||||
Kamaji operates several reconciliation operations, both in the admin and tenant clusters.
|
||||
Kamaji operates several reconciliation operations, both in the admin and Tenant Clusters.
|
||||
With that said, a main manager is responsible to reconcile the admin resources (Deployment, Secret, ConfigMap, etc.), for each Tenant Control Plane a new manager will be spin-up as a main manager controller.
|
||||
These Tenant Control Plane managers, named in the code base as soot managers, in turn, start and run controllers to ensure the desired state of the underlying add-ons, and required resources such as kubeadm ones.
|
||||
|
||||
@@ -25,7 +25,7 @@ Your mileage may vary and just want to share with the community how it has been
|
||||
|
||||
## Infrastructure
|
||||
|
||||
The benchmark has been issued on a Kubernetes cluster backed by Elastic Kubernetes Service used as an Admin cluster.
|
||||
The benchmark has been issued on a Kubernetes cluster backed by Elastic Kubernetes Service used as Management Cluster.
|
||||
|
||||
Two node pools have been created to avoid the noisy neighbour effect, and to increase the performances:
|
||||
|
||||
@@ -191,6 +191,8 @@ kind: TenantControlPlane
|
||||
metadata:
|
||||
name: benchmark$I
|
||||
namespace: $NS
|
||||
labels:
|
||||
tenant.clastix.io: benchmark$I
|
||||
spec:
|
||||
dataStore: $DS
|
||||
controlPlane:
|
||||
@@ -236,4 +238,4 @@ If you're encountering different results, please, engage with the community to s
|
||||
|
||||
# Running a thousand of Tenant Control Planes using multiple DataStores
|
||||
|
||||
The next benchmark must address the use case where a Kamaji admin cluster manages up to a thousand Tenant Control Plane instances.
|
||||
The next benchmark must address the use case where a Kamaji Management Cluster manages up to a thousand Tenant Control Plane instances.
|
||||
|
||||
@@ -4,22 +4,24 @@ Currently, **Kamaji** allows customization using CLI flags for the `manager` sub
|
||||
|
||||
Available flags are the following:
|
||||
|
||||
| Flag | Usage | Default |
|
||||
| ---- | ------ | --- |
|
||||
| `--metrics-bind-address` | The address the metric endpoint binds to. | `:8080` |
|
||||
| `--health-probe-bind-address` | The address the probe endpoint binds to. | `:8081` |
|
||||
| `--leader-elect` | Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. | `true` |
|
||||
| `--tmp-directory` | Directory which will be used to work with temporary files. | `/tmp/kamaji` |
|
||||
| `--kine-image` | Container image along with tag to use for the Kine sidecar container (used only if etcd-storage-type is set to one of kine strategies). | `rancher/kine:v0.9.2-amd64` |
|
||||
| `--datastore` | The default DataStore that should be used by Kamaji to setup the required storage. | `etcd` |
|
||||
| `--migrate-image` | Specify the container image to launch when a TenantControlPlane is migrated to a new datastore. | `migrate-image` |
|
||||
| `--max-concurrent-tcp-reconciles` | Specify the number of workers for the Tenant Control Plane controller (beware of CPU consumption). | `1` |
|
||||
| `--pod-namespace` | The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs. | `os.Getenv("POD_NAMESPACE")` |
|
||||
| `--webhook-service-name` | The Kamaji webhook server Service name which is used to get validation webhooks, required for the TenantControlPlane migration jobs. | `kamaji-webhook-service` |
|
||||
| `--serviceaccount-name` | The Kubernetes ServiceAccount used by the Operator, required for the TenantControlPlane migration jobs. | `os.Getenv("SERVICE_ACCOUNT")` |
|
||||
| `--webhook-ca-path` | Path to the Manager webhook server CA, required for the TenantControlPlane migration jobs. | `/tmp/k8s-webhook-server/serving-certs/ca.crt` |
|
||||
| `--zap-devel` | Development Mode (encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode (encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error). | `true` |
|
||||
| `--zap-encoder` | Zap log encoding, one of 'json' or 'console' | `console` |
|
||||
| `--zap-log-level` | Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity | `info` |
|
||||
| `--zap-stacktrace-level` | Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic'). | `info` |
|
||||
| `--zap-time-encoding` | Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano') | `epoch` |
|
||||
| Flag | Usage | Default |
|
||||
|-----------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------|
|
||||
| `--metrics-bind-address` | The address the metric endpoint binds to. | `:8080` |
|
||||
| `--health-probe-bind-address` | The address the probe endpoint binds to. | `:8081` |
|
||||
| `--leader-elect` | Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. | `true` |
|
||||
| `--tmp-directory` | Directory which will be used to work with temporary files. | `/tmp/kamaji` |
|
||||
| `--kine-image` | Container image along with tag to use for the Kine sidecar container (used only if etcd-storage-type is set to one of kine strategies). | `rancher/kine:v0.9.2-amd64` |
|
||||
| `--datastore` | The default DataStore that should be used by Kamaji to setup the required storage. | `etcd` |
|
||||
| `--migrate-image` | Specify the container image to launch when a TenantControlPlane is migrated to a new datastore. | `migrate-image` |
|
||||
| `--max-concurrent-tcp-reconciles` | Specify the number of workers for the Tenant Control Plane controller (beware of CPU consumption). | `1` |
|
||||
| `--pod-namespace` | The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs. | `os.Getenv("POD_NAMESPACE")` |
|
||||
| `--webhook-service-name` | The Kamaji webhook server Service name which is used to get validation webhooks, required for the TenantControlPlane migration jobs. | `kamaji-webhook-service` |
|
||||
| `--serviceaccount-name` | The Kubernetes ServiceAccount used by the Operator, required for the TenantControlPlane migration jobs. | `os.Getenv("SERVICE_ACCOUNT")` |
|
||||
| `--webhook-ca-path` | Path to the Manager webhook server CA, required for the TenantControlPlane migration jobs. | `/tmp/k8s-webhook-server/serving-certs/ca.crt` |
|
||||
| `--controller-reconcile-timeout` | The reconciliation request timeout before the controller withdraw the external resource calls, such as dealing with the Datastore, or the Tenant Control Plane API endpoint. | `30s` |
|
||||
| `--cache-resync-period` | The controller-runtime.Manager cache resync period. | `10h` |
|
||||
| `--zap-devel` | Development Mode (encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode (encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error). | `true` |
|
||||
| `--zap-encoder` | Zap log encoding, one of 'json' or 'console' | `console` |
|
||||
| `--zap-log-level` | Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity | `info` |
|
||||
| `--zap-stacktrace-level` | Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic'). | `info` |
|
||||
| `--zap-time-encoding` | Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano') | `epoch` |
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
# Conformance
|
||||
# CNCF Conformance
|
||||
For organizations using Kubernetes, conformance enables interoperability, consistency, and confirmability between Kubernetes installations. The Cloud Computing Native Foundation - CNCF - provides the [Certified Kubernetes Conformance Program](https://www.cncf.io/certification/software-conformance/).
|
||||
|
||||
The standard set of conformance tests is currently those defined by the `[Conformance]` tag in the
|
||||
[kubernetes e2e](https://github.com/kubernetes/kubernetes/tree/master/test/e2e) suite.
|
||||
|
||||
All the _“tenant clusters”_ built with Kamaji are CNCF conformant:
|
||||
All the _“Tenant Clusters”_ built with Kamaji are CNCF conformant:
|
||||
|
||||
- [v1.23](https://github.com/cncf/k8s-conformance/pull/2194)
|
||||
- [v1.24](https://github.com/cncf/k8s-conformance/pull/2193)
|
||||
- [v1.25](https://github.com/cncf/k8s-conformance/pull/2188)
|
||||
- [v1.26](https://github.com/cncf/k8s-conformance/pull/2787)
|
||||
- [v1.27](https://github.com/cncf/k8s-conformance/pull/2786)
|
||||
- [v1.28](https://github.com/cncf/k8s-conformance/pull/2785)
|
||||
|
||||
<p align="left" style="padding: 6px 6px">
|
||||
<img src="https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/certified-kubernetes/versionless/color/certified-kubernetes-color.png" width="100" />
|
||||
@@ -21,13 +24,13 @@ regularly built and kept up to date to execute against all currently supported v
|
||||
|
||||
Download a [binary release](https://github.com/vmware-tanzu/sonobuoy/releases) of the CLI.
|
||||
|
||||
Make sure to access your tenant cluster:
|
||||
Make sure to access your Tenant Cluster:
|
||||
|
||||
```
|
||||
export KUBECONFIG=tenant.kubeconfig
|
||||
```
|
||||
|
||||
Deploy a Sonobuoy pod to your tenant cluster with:
|
||||
Deploy a Sonobuoy pod to your Tenant Cluster with:
|
||||
|
||||
```
|
||||
sonobuoy run --mode=certified-conformance
|
||||
|
||||