Compare commits

...

27 Commits

Author SHA1 Message Date
Dario Tranchitella
b594b598b1 chore(helm)!: tcp pod advanced scheduling 2022-10-21 14:39:24 +02:00
Dario Tranchitella
c8ce212730 chore(kustomize): tcp pod advanced scheduling 2022-10-21 14:39:24 +02:00
Dario Tranchitella
714b173132 docs: tcp pod advanced scheduling 2022-10-21 14:39:24 +02:00
Dario Tranchitella
0217d579d6 feat: tcp pod advanced scheduling 2022-10-21 14:39:24 +02:00
Dario Tranchitella
c242f4ac58 api!: tcp pod advanced scheduling 2022-10-21 14:39:24 +02:00
Dario Tranchitella
d4d25a8a05 chore(makefile): golint recipe 2022-10-21 14:39:24 +02:00
maxgio92
cff7f7c4e5 Refactor documentation and provide a website (#173) 2022-10-20 09:57:54 +02:00
Dario Tranchitella
6c817fd7ab fix(helm): kubeversion constraint 2022-10-12 11:27:45 +02:00
Massimiliano Giovagnoli
d31ada4da6 docs: add link to env file for admin cluster setup
Signed-off-by: Massimiliano Giovagnoli <me@maxgio.it>
2022-10-12 10:20:13 +02:00
Massimiliano Giovagnoli
ee01f721d2 docs: add link script for joining nodes setup
Signed-off-by: Massimiliano Giovagnoli <me@maxgio.it>
2022-10-11 17:48:06 +02:00
bsctl
912e010363 docs: add cncf conformance logo 2022-10-06 10:18:18 +02:00
bsctl
e2b03ca873 docs: add cncf conformance logo 2022-10-06 10:18:18 +02:00
Adriano Pezzuto
dccf7bd540 chore(helm): update metadata to helm chart 2022-10-05 09:47:37 +02:00
bsctl
25a65a7496 fix(docs): add logo in svg format 2022-09-23 19:32:16 +02:00
Dario Tranchitella
1ff03246c6 chore(helm): bumping to v0.1.0 2022-09-19 11:43:51 +02:00
Dario Tranchitella
8335f645a5 chore(kustomize): bumping to v0.1.0 2022-09-19 11:43:51 +02:00
Dario Tranchitella
70a791be74 chore(makefile): bumping to v0.1.0 2022-09-19 11:43:51 +02:00
bsctl
b0293c23b5 fix(docs): minor improvement 2022-09-16 20:36:49 +02:00
bsctl
50bba9bb2e fix(docs): deploy tenant nodes on separate subnet 2022-09-16 20:36:49 +02:00
bsctl
f05f7eaf07 fix(docs): remove outdated manifests 2022-09-16 20:36:49 +02:00
bsctl
bfd34ef47e fix(docs): minor improvements 2022-09-16 20:36:49 +02:00
bsctl
b73c7a20ed fix(docs): update roadmap in readme 2022-09-16 20:36:49 +02:00
bsctl
004441e77e fix(docs): use default md style for api reference 2022-09-16 20:36:49 +02:00
bsctl
0f85b6c534 fix(docs): wrong links in readme 2022-09-16 20:36:49 +02:00
bsctl
b674738f0d fix(docs): pin always the kubeadm versions 2022-09-16 20:36:49 +02:00
bsctl
6dc3cd1876 fix(docs): set requirements on kubeadm version 2022-09-16 20:36:49 +02:00
bsctl
96a57fefa5 refactor(docs): track new features and improvements 2022-09-16 20:36:49 +02:00
55 changed files with 8790 additions and 5580 deletions

View File

@@ -3,7 +3,7 @@
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
VERSION ?= 0.0.1
VERSION ?= 0.1.1
# CHANNELS define the bundle channels used in the bundle.
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
@@ -36,7 +36,7 @@ IMAGE_TAG_BASE ?= clastix.io/operator
BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION)
# Image URL to use all building/pushing image targets
IMG ?= clastix/kamaji:latest
IMG ?= clastix/kamaji:v$(VERSION)
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
@@ -87,10 +87,18 @@ CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.9.2)
GOLANGCI_LINT = $(shell pwd)/bin/golangci-lint
golangci-lint: ## Download golangci-lint locally if necessary.
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.49.0)
KUSTOMIZE = $(shell pwd)/bin/kustomize
kustomize: ## Download kustomize locally if necessary.
$(call install-kustomize,$(KUSTOMIZE),3.8.7)
APIDOCS_GEN = $(shell pwd)/bin/crdoc
apidocs-gen: ## Download crdoc locally if necessary.
$(call go-install-tool,$(APIDOCS_GEN),fybrik.io/crdoc@latest)
##@ Development
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
@@ -101,6 +109,9 @@ manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and Cust
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
golint: golangci-lint ## Linting the code according to the styling guide.
$(GOLANGCI_LINT) run -c .golangci.yml
test:
go test ./... -coverprofile cover.out
@@ -245,3 +256,8 @@ env:
e2e: env load helm ginkgo ## Create a KinD cluster, install Kamaji on it and run the test suite.
$(HELM) upgrade --debug --install kamaji ./charts/kamaji --create-namespace --namespace kamaji-system --set "image.pullPolicy=Never"
$(GINKGO) -v ./e2e
##@ Document
apidoc: apidocs-gen
$(APIDOCS_GEN) crdoc --resources config/crd/bases --output docs/content/reference/api.md --template docs/templates/reference-cr.tmpl

View File

@@ -20,39 +20,14 @@ Global hyper-scalers are leading the Managed Kubernetes space, while other cloud
**Kamaji** aims to solve these pains by leveraging multi-tenancy and simplifying how to run multiple control planes on the same infrastructure with a fraction of the operational burden.
## How it works
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. What makes Kamaji special is that Control Planes of _“tenant clusters”_ are just regular pods running in the _“admin cluster”_ instead of dedicated Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate. View [Core Concepts](./docs/concepts.md) for a deeper understanding of principles behind Kamaji's design.
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. What makes Kamaji special is that Control Planes of _“tenant clusters”_ are just regular pods running in the _“admin cluster”_ instead of dedicated Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate.
<p align="center">
<img src="assets/kamaji-light.png#gh-light-mode-only" />
</p>
<p align="center">
<img src="assets/kamaji-dark.png#gh-dark-mode-only" />
</p>
All the tenant clusters built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves.
<p align="center">
<img src="assets/screenshot.png" />
</p>
![Architecture](docs/content/images/kamaji-light.png#gh-light-mode-only)
![Architecture](docs/content/images/kamaji-dark.png#gh-dark-mode-only)
## Getting started
Please refer to the [Getting Started guide](./docs/getting-started-with-kamaji.md) to deploy a minimal setup of Kamaji on KinD.
> This project is still in the early development stage which means it's not ready for production as APIs, commands, flags, etc. are subject to change, but also that your feedback can still help to shape it. Please try it out and let us know what you like, dislike, what works, what doesn't, etc.
## Use cases
Kamaji project has been initially started as a solution for actual and common problems such as minimizing the Total Cost of Ownership while running Kubernetes at large scale. However, it can open a wider range of use cases.
Here are a few:
- **Managed Kubernetes:** enable companies to provide Cloud Native Infrastructure with ease by introducing a strong separation of concerns between management and workloads. Centralize clusters management, monitoring, and observability by leaving developers to focus on applications, increase productivity and reduce operational costs.
- **Kubernetes as a Service:** provide Kubernetes clusters in a self-service fashion by running management and workloads on different infrastructures with the option of Bring Your Own Device, BYOD.
- **Control Plane as a Service:** provide multiple Kubernetes control planes running on top of a single Kubernetes cluster. Tenants who use namespaces based isolation often still need access to cluster wide resources like Cluster Roles, Admission Webhooks, or Custom Resource Definitions.
- **Edge Computing:** distribute Kubernetes workloads across edge computing locations without having to manage multiple clusters across various providers. Centralize management of hundreds of control planes while leaving workloads to run isolated on their own dedicated infrastructure.
- **Cluster Simulation:** check new Kubernetes API or experimental flag or a new tool without impacting production operations. Kamaji will let you simulate such things in a safe and controlled environment.
- **Workloads Testing:** check the behaviour of your workloads on different and multiple versions of Kubernetes with ease by deploying multiple Control Planes in a single cluster.
Please refer to the [Getting Started guide](https://kamaji.clastix.io/getting-started/) to deploy a minimal setup of Kamaji on KinD.
## Features
@@ -74,37 +49,16 @@ Here are a few:
- [ ] Custom Prometheus metrics for monitoring and alerting
- [x] `kine` integration for MySQL as datastore
- [x] `kine` integration for PostgreSQL as datastore
- [ ] Deeper `kubeadm` integration
- [ ] Pooling of multiple `etcd` datastores
- [x] Pool of multiple datastores
- [ ] Automatic assigning of Tenant Control Plane to a datastore
- [ ] Autoscaling of Tenant Control Plane pods
## Documentation
Please, check the project's [documentation](./docs/) for getting started with Kamaji.
Please, check the project's [documentation](https://kamaji.clastix.io/) for getting started with Kamaji.
## Contributions
Kamaji is Open Source with Apache 2 license and any contribution is welcome.
## Community
Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
## FAQs
Q. What does Kamaji means?
A. Kamaji is named as the character _Kamaji_ from the Japanese movie [_Spirited Away_](https://en.wikipedia.org/wiki/Spirited_Away).
Q. Is Kamaji another Kubernetes distribution?
A. No, Kamaji is a Kubernetes Operator you can install on top of any Kubernetes cluster to provide hundreds of managed Kubernetes clusters as a service. We tested Kamaji on vanilla Kubernetes 1.22+, KinD, and Azure AKS. We expect it to work smoothly on other Kubernetes distributions. The tenant clusters made with Kamaji are conformant CNCF Kubernetes clusters as we leverage on [`kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
Q. Is it safe to run Kubernetes control plane components in a pod instead of dedicated virtual machines?
A. Yes, the tenant control plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
Q. You already provide a Kubernetes multi-tenancy solution with [Capsule](https://capsule.clastix.io). Why does Kamaji matter?
A. A multi-tenancy solution, like Capsule shares the Kubernetes control plane among all tenants keeping tenant namespaces isolated by policies. While the solution is the right choice by balancing between features and ease of usage, there are cases where a tenant user requires access to the control plane, for example, when a tenant requires to manage CRDs on his own. With Kamaji, you can provide cluster admin permissions to the tenant.
Q. Well you convinced me, how to get a try?
A. It is possible to get started with Kamaji on a laptop with [KinD](./docs/getting-started-with-kamaji.md) installed.
Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.

View File

@@ -85,6 +85,16 @@ type ControlPlaneComponentsResources struct {
type DeploymentSpec struct {
// +kubebuilder:default=2
Replicas int32 `json:"replicas,omitempty"`
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// If specified, the Tenant Control Plane pod's tolerations.
// More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// If specified, the Tenant Control Plane pod's scheduling constraints.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// TopologySpreadConstraints describes how the Tenant Control Plane pods ought to spread across topology
// domains. Scheduler will schedule pods in a way which abides by the constraints.
// In case of nil underlying LabelSelector, the Kamaji one for the given Tenant Control Plane will be used.

View File

@@ -521,6 +521,25 @@ func (in *DataStoreStatus) DeepCopy() *DataStoreStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.TopologySpreadConstraints != nil {
in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
*out = make([]v1.TopologySpreadConstraint, len(*in))

1
assets/kamaji-logo.svg Normal file
View File

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" role="img" viewBox="11.85 8.10 202.80 187.55"><title>Kamaji</title><path d="M32.1 13.7c-2.4.9-6.3 3.5-8.6 5.8-7.7 7.7-7.5 5-7.5 82.5 0 77.4-.2 74.8 7.5 82.5 7.7 7.8 4.2 7.5 90 7.5s82.3.3 90-7.5c7.7-7.7 7.5-5.1 7.5-82.5s.2-74.8-7.5-82.5c-7.8-7.8-4.1-7.5-90.4-7.4-66.7 0-77.2.3-81 1.6zm160.5 9.9c1.9.9 4.4 3.1 5.7 4.8l2.2 3.1v141l-2.2 3.1c-4.8 6.7-1.1 6.4-84.8 6.4s-80 .3-84.8-6.4l-2.2-3.1v-141l2.2-3.1c4.8-6.6.8-6.4 84.6-6.4 68 0 76.3.2 79.3 1.6z"/><path d="M90.1 33.7c-5.1 2.5-7.3 6.7-6.8 13.1.3 4.1 1 5.9 3.3 8.4s2.5 3 .9 2.3c-2-.7-25.1-4.6-29-4.9-1.1 0-2 .5-2 1.4 0 1.1-1.2 1.5-4.9 1.5-6.7 0-6.8 1.9-.4 4 8.2 2.7 9 3.4 3.3 3.5-5.3 0-8.2 1.1-7.1 2.8.7 1.2-2.7 2.2-8.1 2.2-7 0-6.5 2.4 1.1 5.1l3.9 1.4-2.9.5c-4.3.8-3.2 2.3 2.8 4.1l5.3 1.5-5.2 2.7c-8.2 4.2-8.3 5.8-.4 6.1 5.6.2 7.3 1.1 4.2 2.1-2.3.7-2.8 3.1-.9 3.7.7.3-.5 2-2.8 4-5.6 5.3-4 6.4 6.2 4.5 4.4-.8 8.1-1.3 8.3-1.2.2.2-1.3 2.4-3.3 4.8-2 2.4-3.6 4.7-3.6 5.2 0 .4 1.4.5 3 .3 2.9-.4 4 .5 2 1.7-.5.3-1 1.3-1 2.2 0 1.6 2.2 1.5 6.5-.3 1.7-.7 1.6-.2-.9 3-5.4 7.2.7 6.5 13.6-1.4 2.7-1.7 5.1-3 5.4-3 .3 0-.9 2.1-2.7 4.6-4.5 6.6-2.5 7.9 3.7 2.3 4.6-4.3 4.7-4.3 3-1.2-1.9 3.8-2.1 5.6-.4 5.1.6-.2 7.1-7.1 14.3-15.4 7.2-8.2 13.7-14.9 14.5-14.9.8 0 7.3 6.7 14.6 15 7.2 8.2 13.7 15.1 14.3 15.3 1.6.5 1.4-1.4-.5-5-1.6-3.2-1.6-3.2 3.2 1 6 5.1 7.8 4 3.5-2.2-1.8-2.5-3-4.6-2.7-4.6.3 0 2.7 1.3 5.4 3 12.9 7.9 19 8.6 13.6 1.4-2.5-3.2-2.6-3.7-.9-3 5.9 2.5 7.7 1.7 5.6-2.3-.9-1.5-.6-1.7 2-1.3 3.8.6 3.7-.5-.7-5.7-2-2.3-3.5-4.4-3.2-4.6.2-.2 2.1 0 4.3.4 13.9 3 16.4 1.8 9.8-4.3-2.1-1.9-3.2-3.6-2.5-3.6 2 0 1.4-2.8-.9-3.5-3.2-1-1.3-2 4.2-2.1 7.9-.2 7.8-1.9-.4-6.1l-5.2-2.7 5.4-1.6c6.4-1.8 7.9-4 2.9-4.1h-3.3l3.9-1.5c7.3-2.6 8.4-5.4 2.2-5.4-5.1 0-9.6-1.1-9-2.2 1.1-1.7-1.8-2.8-7.1-2.8-5.7-.1-4.9-.8 3.3-3.5 6.4-2.1 6.3-4-.4-4-3.7 0-4.9-.4-4.9-1.5 0-.9-.9-1.4-2-1.4-3.9.3-27 4.2-29 4.9-1.6.7-1.4.2.9-2.3 3.7-4 4.7-11.3 2.2-16.1-4.8-9.2-18.8-9.3-23.8 0-4.4 8.3.2 18.4 9.5 20.5 3 .6 2.8.8-5.5 4l-8.8 3.3-8.7-3.3c-8.1-3.2-8.4-3.4-5.5-4.1 1.7-.3 4.3-1.5 5.7-2.7 13.1-10.3.6-30.4-14.4-23.1zm77.6 98.4c-3.6 2.1-.8 7.7 3.2 6.4 2.1-.6 3.5-3.1 2.5-4.6-1.1-1.8-4-2.7-5.7-1.8zm8.3 3.9c0 1.9.5 2.1 6.3 1.8 4.7-.2 6.2-.7 6.2-1.8s-1.5-1.6-6.2-1.8c-5.8-.3-6.3-.1-6.3 1.8zm-135.6.3c-.2.7-.3 7.4-.2 14.8l.3 13.4 3.3.3c3.1.3 3.2.2 3.2-3.4 0-2.5.7-4.6 2.1-6l2.1-2.3 5 6c3.9 4.7 5.6 5.9 7.8 5.9 1.6 0 3.1-.3 3.3-.8.3-.4-2.1-4-5.4-8.1-3.2-4-5.9-7.6-5.9-8 0-.4 2.5-3.1 5.5-6.1 3-3 5.5-5.8 5.5-6.2 0-.4-1.5-.8-3.3-.8-2.8 0-4.4 1-9.6 6.5-3.5 3.6-6.5 6.5-6.7 6.5-.2 0-.4-2.9-.4-6.5V135h-3c-1.7 0-3.3.6-3.6 1.3zm31.2 7c-1.1.8-1.5 1.9-1 3 .5 1.4 1.3 1.6 4 1.1 4.2-.8 8.4.2 8.4 2 0 .8-1.8 1.5-5.1 1.9-6 .7-8.9 2.9-8.9 6.6 0 3.2.8 4.4 3.7 6 2.9 1.5 5.2 1.4 8.6-.3 2.3-1.3 2.7-1.3 2.7 0 0 .9 1.1 1.4 3 1.4h3v-8.6c0-8.1-.1-8.7-2.9-11.5-2.5-2.5-3.7-2.9-8.3-2.9-3 0-6.2.6-7.2 1.3zm11.2 13.9c-.2 1.7-1.1 2.4-3.2 2.6-3.3.4-5.1-1-4.3-3.2.4-1.1 1.9-1.6 4.2-1.6 3.2 0 3.6.3 3.3 2.2zm13.4-4l.3 11.3h6l.5-7.8c.5-7.6 1.5-9.6 4.7-9.7 3 0 4.3 3.2 4.3 10.6v7.4h3c3 0 3 0 3-5.9 0-7.3 1.2-10.7 4.1-11.6 3.8-1.3 5.9 2.5 5.9 10.6v6.9h6v-9c0-8.3-.2-9.3-2.5-11.5-2.9-3-9.8-3.5-12.7-.8-1.7 1.5-1.9 1.5-3.6 0-2.2-2-9.2-2.3-11.1-.5-1.1 1-1.4 1-1.8 0-.3-.6-1.8-1.2-3.4-1.2h-3l.3 11.2zm45.4-9.9c-1.1.8-1.5 1.9-1 3 .5 1.4 1.3 1.6 4 1.1 4.2-.8 8.4.2 8.4 2 0 .8-1.8 1.5-5.1 1.9-6 .7-8.9 2.9-8.9 6.6 0 3.2.8 4.4 3.7 6 2.9 1.5 5.2 1.4 8.6-.3 2.3-1.3 2.7-1.3 2.7 0 0 .9 1.1 1.4 3 1.4h3v-8.6c0-8.1-.1-8.7-2.9-11.5-2.5-2.5-3.7-2.9-8.3-2.9-3 0-6.2.6-7.2 1.3zm11.2 13.9c-.2 1.7-1.1 2.4-3.2 2.6-3.3.4-5.1-1-4.3-3.2.4-1.1 1.9-1.6 4.2-1.6 3.2 0 3.6.3 3.3 2.2zm13-2.5c-.3 12.8-.3 12.8-2.7 12.8-1.5 0-2.7.8-3.1 2-2 5.4 9.4 4.3 11.9-1.2.6-1.3 1.1-7.7 1.1-14.3v-12h-6.9l-.3 12.7zm13.4-1.5l.3 11.3h6v-22l-3.3-.3-3.3-.3.3 11.3z"/></svg>

After

Width:  |  Height:  |  Size: 3.6 KiB

View File

@@ -1,37 +1,26 @@
apiVersion: v2
name: kamaji
description: Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler.
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.9.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: 0.1.0
appVersion: v0.1.1
description: Kamaji is a tool aimed to build and operate a Managed Kubernetes Service
with a fraction of the operational burden. With Kamaji, you can deploy and operate
hundreds of Kubernetes clusters as a hyper-scaler.
home: https://github.com/clastix/kamaji
sources: ["https://github.com/clastix/kamaji"]
kubeVersion: ">=1.18"
icon: https://github.com/clastix/kamaji/raw/master/assets/kamaji-logo.png
kubeVersion: 1.21 - 1.25
maintainers:
- email: iam@mendrugory.com
name: Gonzalo Gabriel Jiménez Fuentes
- email: dario@tranchitella.eu
name: Dario Tranchitella
- email: me@maxgio.it
name: Massimiliano Giovagnoli
- email: me@bsctl.io
name: Adriano Pezzuto
- email: iam@mendrugory.com
name: Gonzalo Gabriel Jiménez Fuentes
name: kamaji
sources:
- https://github.com/clastix/kamaji
type: application
version: 0.10.0
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/release-name: kamaji
catalog.cattle.io/display-name: Kamaji - Managed Kubernetes Service

View File

@@ -1,6 +1,6 @@
# kamaji
![Version: 0.9.1](https://img.shields.io/badge/Version-0.9.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.1.0](https://img.shields.io/badge/AppVersion-0.1.0-informational?style=flat-square)
![Version: 0.10.0](https://img.shields.io/badge/Version-0.10.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.1.1](https://img.shields.io/badge/AppVersion-v0.1.1-informational?style=flat-square)
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler.
@@ -8,10 +8,10 @@ Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a
| Name | Email | Url |
| ---- | ------ | --- |
| Gonzalo Gabriel Jiménez Fuentes | <iam@mendrugory.com> | |
| Dario Tranchitella | <dario@tranchitella.eu> | |
| Massimiliano Giovagnoli | <me@maxgio.it> | |
| Adriano Pezzuto | <me@bsctl.io> | |
| Gonzalo Gabriel Jiménez Fuentes | <iam@mendrugory.com> | |
## Source Code
@@ -19,7 +19,7 @@ Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a
## Requirements
Kubernetes: `>=1.18`
Kubernetes: `1.21 - 1.25`
[Kamaji](https://github.com/clastix/kamaji) requires a [multi-tenant `etcd`](https://github.com/clastix/kamaji-internal/blob/master/deploy/getting-started-with-kamaji.md#setup-internal-multi-tenant-etcd) cluster.
This Helm Chart starting from v0.1.1 provides the installation of an internal `etcd` in order to streamline the local test. If you'd like to use an externally managed etcd instance, you can specify the overrides and by setting the value `etcd.deploy=false`.
@@ -110,7 +110,7 @@ Here the values you can override:
| healthProbeBindAddress | string | `":8081"` | The address the probe endpoint binds to. (default ":8081") |
| image.pullPolicy | string | `"Always"` | |
| image.repository | string | `"clastix/kamaji"` | The container image of the Kamaji controller. |
| image.tag | string | `"latest"` | |
| image.tag | string | `nil` | Overrides the image tag whose default is the chart appVersion. |
| imagePullSecrets | list | `[]` | |
| livenessProbe | object | `{"httpGet":{"path":"/healthz","port":"healthcheck"},"initialDelaySeconds":15,"periodSeconds":20}` | The livenessProbe for the controller container |
| loggingDevel.enable | bool | `false` | (string) Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default false) |

View File

@@ -0,0 +1,30 @@
# Kamaji - Managed Kubernetes Service
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden.
Useful links:
- [Kamaji Github repository](https://github.com/clastix/kamaji)
- [Kamaji Documentation](https://github.com/clastix/kamaji/docs/)
## Requirements
* Kubernetes v1.22+
* Helm v3
# Installation
To install the Chart with the release name `kamaji`:
helm upgrade --install --namespace kamaji-system --create-namespace clastix/kamaji
Show the status:
helm status kamaji -n kamaji-system
Upgrade the Chart
helm upgrade kamaji -n kamaji-system clastix/kamaji
Uninstall the Chart
helm uninstall kamaji -n kamaji-system

View File

@@ -171,6 +171,909 @@ spec:
type: string
type: object
type: object
affinity:
description: 'If specified, the Tenant Control Plane pod''s
scheduling constraints. More info: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/'
properties:
nodeAffinity:
description: Describes node affinity scheduling rules
for the pod.
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule
pods to nodes that satisfy the affinity expressions
specified by this field, but it may choose a node
that violates one or more of the expressions. The
node that is most preferred is the one with the
greatest sum of weights, i.e. for each node that
meets all of the scheduling requirements (resource
request, requiredDuringScheduling affinity expressions,
etc.), compute a sum by iterating through the elements
of this field and adding "weight" to the sum if
the node matches the corresponding matchExpressions;
the node(s) with the highest sum are the most preferred.
items:
description: An empty preferred scheduling term
matches all objects with implicit weight 0 (i.e.
it's a no-op). A null preferred scheduling term
matches no objects (i.e. is also a no-op).
properties:
preference:
description: A node selector term, associated
with the corresponding weight.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: A node selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: The label key that the
selector applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators
are In, NotIn, Exists, DoesNotExist.
Gt, and Lt.
type: string
values:
description: An array of string values.
If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty.
If the operator is Gt or Lt, the
values array must have a single
element, which will be interpreted
as an integer. This array is replaced
during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: A node selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: The label key that the
selector applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators
are In, NotIn, Exists, DoesNotExist.
Gt, and Lt.
type: string
values:
description: An array of string values.
If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty.
If the operator is Gt or Lt, the
values array must have a single
element, which will be interpreted
as an integer. This array is replaced
during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
weight:
description: Weight associated with matching
the corresponding nodeSelectorTerm, in the
range 1-100.
format: int32
type: integer
required:
- preference
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified
by this field are not met at scheduling time, the
pod will not be scheduled onto the node. If the
affinity requirements specified by this field cease
to be met at some point during pod execution (e.g.
due to an update), the system may or may not try
to eventually evict the pod from its node.
properties:
nodeSelectorTerms:
description: Required. A list of node selector
terms. The terms are ORed.
items:
description: A null or empty node selector term
matches no objects. The requirements of them
are ANDed. The TopologySelectorTerm type implements
a subset of the NodeSelectorTerm.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: A node selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: The label key that the
selector applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators
are In, NotIn, Exists, DoesNotExist.
Gt, and Lt.
type: string
values:
description: An array of string values.
If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty.
If the operator is Gt or Lt, the
values array must have a single
element, which will be interpreted
as an integer. This array is replaced
during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: A node selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: The label key that the
selector applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators
are In, NotIn, Exists, DoesNotExist.
Gt, and Lt.
type: string
values:
description: An array of string values.
If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty.
If the operator is Gt or Lt, the
values array must have a single
element, which will be interpreted
as an integer. This array is replaced
during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
type: array
required:
- nodeSelectorTerms
type: object
x-kubernetes-map-type: atomic
type: object
podAffinity:
description: Describes pod affinity scheduling rules (e.g.
co-locate this pod in the same node, zone, etc. as some
other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule
pods to nodes that satisfy the affinity expressions
specified by this field, but it may choose a node
that violates one or more of the expressions. The
node that is most preferred is the one with the
greatest sum of weights, i.e. for each node that
meets all of the scheduling requirements (resource
request, requiredDuringScheduling affinity expressions,
etc.), compute a sum by iterating through the elements
of this field and adding "weight" to the sum if
the node has pods which matches the corresponding
podAffinityTerm; the node(s) with the highest sum
are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm
fields are added per-node to find the most preferred
node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term,
associated with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of
resources, in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The
requirements are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label
key that the selector applies
to.
type: string
operator:
description: operator represents
a key's relationship to a set
of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array
of string values. If the operator
is In or NotIn, the values array
must be non-empty. If the operator
is Exists or DoesNotExist, the
values array must be empty.
This array is replaced during
a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of
{key,value} pairs. A single {key,value}
in the matchLabels map is equivalent
to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are
ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set
of namespaces that the term applies to.
The term is applied to the union of the
namespaces selected by this field and
the ones listed in the namespaces field.
null selector and null or empty namespaces
list means "this pod's namespace". An
empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The
requirements are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label
key that the selector applies
to.
type: string
operator:
description: operator represents
a key's relationship to a set
of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array
of string values. If the operator
is In or NotIn, the values array
must be non-empty. If the operator
is Exists or DoesNotExist, the
values array must be empty.
This array is replaced during
a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of
{key,value} pairs. A single {key,value}
in the matchLabels map is equivalent
to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are
ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static
list of namespace names that the term
applies to. The term is applied to the
union of the namespaces listed in this
field and the ones selected by namespaceSelector.
null or empty namespaces list and null
namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located
(affinity) or not co-located (anti-affinity)
with the pods matching the labelSelector
in the specified namespaces, where co-located
is defined as running on a node whose
value of the label with key topologyKey
matches that of any node on which any
of the selected pods is running. Empty
topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching
the corresponding podAffinityTerm, in the
range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified
by this field are not met at scheduling time, the
pod will not be scheduled onto the node. If the
affinity requirements specified by this field cease
to be met at some point during pod execution (e.g.
due to a pod label update), the system may or may
not try to eventually evict the pod from its node.
When there are multiple elements, the lists of nodes
corresponding to each podAffinityTerm are intersected,
i.e. all terms must be satisfied.
items:
description: Defines a set of pods (namely those
matching the labelSelector relative to the given
namespace(s)) that this pod should be co-located
(affinity) or not co-located (anti-affinity) with,
where co-located is defined as running on a node
whose value of the label with key <topologyKey>
matches that of any node on which a pod of the
set of pods is running
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The requirements
are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key
that the selector applies to.
type: string
operator:
description: operator represents a
key's relationship to a set of values.
Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of
string values. If the operator is
In or NotIn, the values array must
be non-empty. If the operator is
Exists or DoesNotExist, the values
array must be empty. This array
is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied
to the union of the namespaces selected by
this field and the ones listed in the namespaces
field. null selector and null or empty namespaces
list means "this pod's namespace". An empty
selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The requirements
are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key
that the selector applies to.
type: string
operator:
description: operator represents a
key's relationship to a set of values.
Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of
string values. If the operator is
In or NotIn, the values array must
be non-empty. If the operator is
Exists or DoesNotExist, the values
array must be empty. This array
is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list
of namespace names that the term applies to.
The term is applied to the union of the namespaces
listed in this field and the ones selected
by namespaceSelector. null or empty namespaces
list and null namespaceSelector means "this
pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the
pods matching the labelSelector in the specified
namespaces, where co-located is defined as
running on a node whose value of the label
with key topologyKey matches that of any node
on which any of the selected pods is running.
Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
podAntiAffinity:
description: Describes pod anti-affinity scheduling rules
(e.g. avoid putting this pod in the same node, zone,
etc. as some other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule
pods to nodes that satisfy the anti-affinity expressions
specified by this field, but it may choose a node
that violates one or more of the expressions. The
node that is most preferred is the one with the
greatest sum of weights, i.e. for each node that
meets all of the scheduling requirements (resource
request, requiredDuringScheduling anti-affinity
expressions, etc.), compute a sum by iterating through
the elements of this field and adding "weight" to
the sum if the node has pods which matches the corresponding
podAffinityTerm; the node(s) with the highest sum
are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm
fields are added per-node to find the most preferred
node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term,
associated with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of
resources, in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The
requirements are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label
key that the selector applies
to.
type: string
operator:
description: operator represents
a key's relationship to a set
of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array
of string values. If the operator
is In or NotIn, the values array
must be non-empty. If the operator
is Exists or DoesNotExist, the
values array must be empty.
This array is replaced during
a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of
{key,value} pairs. A single {key,value}
in the matchLabels map is equivalent
to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are
ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set
of namespaces that the term applies to.
The term is applied to the union of the
namespaces selected by this field and
the ones listed in the namespaces field.
null selector and null or empty namespaces
list means "this pod's namespace". An
empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The
requirements are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label
key that the selector applies
to.
type: string
operator:
description: operator represents
a key's relationship to a set
of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array
of string values. If the operator
is In or NotIn, the values array
must be non-empty. If the operator
is Exists or DoesNotExist, the
values array must be empty.
This array is replaced during
a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of
{key,value} pairs. A single {key,value}
in the matchLabels map is equivalent
to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are
ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static
list of namespace names that the term
applies to. The term is applied to the
union of the namespaces listed in this
field and the ones selected by namespaceSelector.
null or empty namespaces list and null
namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located
(affinity) or not co-located (anti-affinity)
with the pods matching the labelSelector
in the specified namespaces, where co-located
is defined as running on a node whose
value of the label with key topologyKey
matches that of any node on which any
of the selected pods is running. Empty
topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching
the corresponding podAffinityTerm, in the
range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the anti-affinity requirements specified
by this field are not met at scheduling time, the
pod will not be scheduled onto the node. If the
anti-affinity requirements specified by this field
cease to be met at some point during pod execution
(e.g. due to a pod label update), the system may
or may not try to eventually evict the pod from
its node. When there are multiple elements, the
lists of nodes corresponding to each podAffinityTerm
are intersected, i.e. all terms must be satisfied.
items:
description: Defines a set of pods (namely those
matching the labelSelector relative to the given
namespace(s)) that this pod should be co-located
(affinity) or not co-located (anti-affinity) with,
where co-located is defined as running on a node
whose value of the label with key <topologyKey>
matches that of any node on which a pod of the
set of pods is running
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The requirements
are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key
that the selector applies to.
type: string
operator:
description: operator represents a
key's relationship to a set of values.
Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of
string values. If the operator is
In or NotIn, the values array must
be non-empty. If the operator is
Exists or DoesNotExist, the values
array must be empty. This array
is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied
to the union of the namespaces selected by
this field and the ones listed in the namespaces
field. null selector and null or empty namespaces
list means "this pod's namespace". An empty
selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The requirements
are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key
that the selector applies to.
type: string
operator:
description: operator represents a
key's relationship to a set of values.
Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of
string values. If the operator is
In or NotIn, the values array must
be non-empty. If the operator is
Exists or DoesNotExist, the values
array must be empty. This array
is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list
of namespace names that the term applies to.
The term is applied to the union of the namespaces
listed in this field and the ones selected
by namespaceSelector. null or empty namespaces
list and null namespaceSelector means "this
pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the
pods matching the labelSelector in the specified
namespaces, where co-located is defined as
running on a node whose value of the label
with key topologyKey matches that of any node
on which any of the selected pods is running.
Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
type: object
extraArgs:
description: ExtraArgs allows adding additional arguments
to the Control Plane components, such as kube-apiserver,
@@ -195,6 +1098,14 @@ spec:
type: string
type: array
type: object
nodeSelector:
additionalProperties:
type: string
description: 'NodeSelector is a selector which must be true
for the pod to fit on a node. Selector which must match
a node''s labels for the pod to be scheduled on that node.
More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/'
type: object
replicas:
default: 2
format: int32
@@ -289,6 +1200,49 @@ spec:
type: object
type: object
type: object
tolerations:
description: 'If specified, the Tenant Control Plane pod''s
tolerations. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/'
items:
description: The pod this Toleration is attached to tolerates
any taint that matches the triple <key,value,effect> using
the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match.
Empty means match all taint effects. When specified,
allowed values are NoSchedule, PreferNoSchedule and
NoExecute.
type: string
key:
description: Key is the taint key that the toleration
applies to. Empty means match all taint keys. If the
key is empty, operator must be Exists; this combination
means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship
to the value. Valid operators are Exists and Equal.
Defaults to Equal. Exists is equivalent to wildcard
for value, so that a pod can tolerate all taints of
a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period
of time the toleration (which must be of effect NoExecute,
otherwise this field is ignored) tolerates the taint.
By default, it is not set, which means tolerate the
taint forever (do not evict). Zero and negative values
will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration
matches to. If the operator is Exists, the value should
be empty, otherwise just a regular string.
type: string
type: object
type: array
topologySpreadConstraints:
description: TopologySpreadConstraints describes how the Tenant
Control Plane pods ought to spread across topology domains.

View File

@@ -9,8 +9,8 @@ image:
# -- The container image of the Kamaji controller.
repository: clastix/kamaji
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
tag: latest
# -- Overrides the image tag whose default is the chart appVersion.
tag:
# -- A list of extra arguments to add to the kamaji controller default ones
extraArgs: []

View File

@@ -171,6 +171,909 @@ spec:
type: string
type: object
type: object
affinity:
description: 'If specified, the Tenant Control Plane pod''s
scheduling constraints. More info: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/'
properties:
nodeAffinity:
description: Describes node affinity scheduling rules
for the pod.
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule
pods to nodes that satisfy the affinity expressions
specified by this field, but it may choose a node
that violates one or more of the expressions. The
node that is most preferred is the one with the
greatest sum of weights, i.e. for each node that
meets all of the scheduling requirements (resource
request, requiredDuringScheduling affinity expressions,
etc.), compute a sum by iterating through the elements
of this field and adding "weight" to the sum if
the node matches the corresponding matchExpressions;
the node(s) with the highest sum are the most preferred.
items:
description: An empty preferred scheduling term
matches all objects with implicit weight 0 (i.e.
it's a no-op). A null preferred scheduling term
matches no objects (i.e. is also a no-op).
properties:
preference:
description: A node selector term, associated
with the corresponding weight.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: A node selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: The label key that the
selector applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators
are In, NotIn, Exists, DoesNotExist.
Gt, and Lt.
type: string
values:
description: An array of string values.
If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty.
If the operator is Gt or Lt, the
values array must have a single
element, which will be interpreted
as an integer. This array is replaced
during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: A node selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: The label key that the
selector applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators
are In, NotIn, Exists, DoesNotExist.
Gt, and Lt.
type: string
values:
description: An array of string values.
If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty.
If the operator is Gt or Lt, the
values array must have a single
element, which will be interpreted
as an integer. This array is replaced
during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
weight:
description: Weight associated with matching
the corresponding nodeSelectorTerm, in the
range 1-100.
format: int32
type: integer
required:
- preference
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified
by this field are not met at scheduling time, the
pod will not be scheduled onto the node. If the
affinity requirements specified by this field cease
to be met at some point during pod execution (e.g.
due to an update), the system may or may not try
to eventually evict the pod from its node.
properties:
nodeSelectorTerms:
description: Required. A list of node selector
terms. The terms are ORed.
items:
description: A null or empty node selector term
matches no objects. The requirements of them
are ANDed. The TopologySelectorTerm type implements
a subset of the NodeSelectorTerm.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: A node selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: The label key that the
selector applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators
are In, NotIn, Exists, DoesNotExist.
Gt, and Lt.
type: string
values:
description: An array of string values.
If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty.
If the operator is Gt or Lt, the
values array must have a single
element, which will be interpreted
as an integer. This array is replaced
during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: A node selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: The label key that the
selector applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators
are In, NotIn, Exists, DoesNotExist.
Gt, and Lt.
type: string
values:
description: An array of string values.
If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty.
If the operator is Gt or Lt, the
values array must have a single
element, which will be interpreted
as an integer. This array is replaced
during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
type: array
required:
- nodeSelectorTerms
type: object
x-kubernetes-map-type: atomic
type: object
podAffinity:
description: Describes pod affinity scheduling rules (e.g.
co-locate this pod in the same node, zone, etc. as some
other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule
pods to nodes that satisfy the affinity expressions
specified by this field, but it may choose a node
that violates one or more of the expressions. The
node that is most preferred is the one with the
greatest sum of weights, i.e. for each node that
meets all of the scheduling requirements (resource
request, requiredDuringScheduling affinity expressions,
etc.), compute a sum by iterating through the elements
of this field and adding "weight" to the sum if
the node has pods which matches the corresponding
podAffinityTerm; the node(s) with the highest sum
are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm
fields are added per-node to find the most preferred
node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term,
associated with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of
resources, in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The
requirements are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label
key that the selector applies
to.
type: string
operator:
description: operator represents
a key's relationship to a set
of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array
of string values. If the operator
is In or NotIn, the values array
must be non-empty. If the operator
is Exists or DoesNotExist, the
values array must be empty.
This array is replaced during
a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of
{key,value} pairs. A single {key,value}
in the matchLabels map is equivalent
to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are
ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set
of namespaces that the term applies to.
The term is applied to the union of the
namespaces selected by this field and
the ones listed in the namespaces field.
null selector and null or empty namespaces
list means "this pod's namespace". An
empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The
requirements are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label
key that the selector applies
to.
type: string
operator:
description: operator represents
a key's relationship to a set
of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array
of string values. If the operator
is In or NotIn, the values array
must be non-empty. If the operator
is Exists or DoesNotExist, the
values array must be empty.
This array is replaced during
a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of
{key,value} pairs. A single {key,value}
in the matchLabels map is equivalent
to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are
ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static
list of namespace names that the term
applies to. The term is applied to the
union of the namespaces listed in this
field and the ones selected by namespaceSelector.
null or empty namespaces list and null
namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located
(affinity) or not co-located (anti-affinity)
with the pods matching the labelSelector
in the specified namespaces, where co-located
is defined as running on a node whose
value of the label with key topologyKey
matches that of any node on which any
of the selected pods is running. Empty
topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching
the corresponding podAffinityTerm, in the
range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified
by this field are not met at scheduling time, the
pod will not be scheduled onto the node. If the
affinity requirements specified by this field cease
to be met at some point during pod execution (e.g.
due to a pod label update), the system may or may
not try to eventually evict the pod from its node.
When there are multiple elements, the lists of nodes
corresponding to each podAffinityTerm are intersected,
i.e. all terms must be satisfied.
items:
description: Defines a set of pods (namely those
matching the labelSelector relative to the given
namespace(s)) that this pod should be co-located
(affinity) or not co-located (anti-affinity) with,
where co-located is defined as running on a node
whose value of the label with key <topologyKey>
matches that of any node on which a pod of the
set of pods is running
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The requirements
are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key
that the selector applies to.
type: string
operator:
description: operator represents a
key's relationship to a set of values.
Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of
string values. If the operator is
In or NotIn, the values array must
be non-empty. If the operator is
Exists or DoesNotExist, the values
array must be empty. This array
is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied
to the union of the namespaces selected by
this field and the ones listed in the namespaces
field. null selector and null or empty namespaces
list means "this pod's namespace". An empty
selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The requirements
are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key
that the selector applies to.
type: string
operator:
description: operator represents a
key's relationship to a set of values.
Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of
string values. If the operator is
In or NotIn, the values array must
be non-empty. If the operator is
Exists or DoesNotExist, the values
array must be empty. This array
is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list
of namespace names that the term applies to.
The term is applied to the union of the namespaces
listed in this field and the ones selected
by namespaceSelector. null or empty namespaces
list and null namespaceSelector means "this
pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the
pods matching the labelSelector in the specified
namespaces, where co-located is defined as
running on a node whose value of the label
with key topologyKey matches that of any node
on which any of the selected pods is running.
Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
podAntiAffinity:
description: Describes pod anti-affinity scheduling rules
(e.g. avoid putting this pod in the same node, zone,
etc. as some other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule
pods to nodes that satisfy the anti-affinity expressions
specified by this field, but it may choose a node
that violates one or more of the expressions. The
node that is most preferred is the one with the
greatest sum of weights, i.e. for each node that
meets all of the scheduling requirements (resource
request, requiredDuringScheduling anti-affinity
expressions, etc.), compute a sum by iterating through
the elements of this field and adding "weight" to
the sum if the node has pods which matches the corresponding
podAffinityTerm; the node(s) with the highest sum
are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm
fields are added per-node to find the most preferred
node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term,
associated with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of
resources, in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The
requirements are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label
key that the selector applies
to.
type: string
operator:
description: operator represents
a key's relationship to a set
of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array
of string values. If the operator
is In or NotIn, the values array
must be non-empty. If the operator
is Exists or DoesNotExist, the
values array must be empty.
This array is replaced during
a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of
{key,value} pairs. A single {key,value}
in the matchLabels map is equivalent
to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are
ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set
of namespaces that the term applies to.
The term is applied to the union of the
namespaces selected by this field and
the ones listed in the namespaces field.
null selector and null or empty namespaces
list means "this pod's namespace". An
empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The
requirements are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label
key that the selector applies
to.
type: string
operator:
description: operator represents
a key's relationship to a set
of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array
of string values. If the operator
is In or NotIn, the values array
must be non-empty. If the operator
is Exists or DoesNotExist, the
values array must be empty.
This array is replaced during
a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of
{key,value} pairs. A single {key,value}
in the matchLabels map is equivalent
to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are
ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static
list of namespace names that the term
applies to. The term is applied to the
union of the namespaces listed in this
field and the ones selected by namespaceSelector.
null or empty namespaces list and null
namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located
(affinity) or not co-located (anti-affinity)
with the pods matching the labelSelector
in the specified namespaces, where co-located
is defined as running on a node whose
value of the label with key topologyKey
matches that of any node on which any
of the selected pods is running. Empty
topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching
the corresponding podAffinityTerm, in the
range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the anti-affinity requirements specified
by this field are not met at scheduling time, the
pod will not be scheduled onto the node. If the
anti-affinity requirements specified by this field
cease to be met at some point during pod execution
(e.g. due to a pod label update), the system may
or may not try to eventually evict the pod from
its node. When there are multiple elements, the
lists of nodes corresponding to each podAffinityTerm
are intersected, i.e. all terms must be satisfied.
items:
description: Defines a set of pods (namely those
matching the labelSelector relative to the given
namespace(s)) that this pod should be co-located
(affinity) or not co-located (anti-affinity) with,
where co-located is defined as running on a node
whose value of the label with key <topologyKey>
matches that of any node on which a pod of the
set of pods is running
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The requirements
are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key
that the selector applies to.
type: string
operator:
description: operator represents a
key's relationship to a set of values.
Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of
string values. If the operator is
In or NotIn, the values array must
be non-empty. If the operator is
Exists or DoesNotExist, the values
array must be empty. This array
is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied
to the union of the namespaces selected by
this field and the ones listed in the namespaces
field. null selector and null or empty namespaces
list means "this pod's namespace". An empty
selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list
of label selector requirements. The requirements
are ANDed.
items:
description: A label selector requirement
is a selector that contains values,
a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key
that the selector applies to.
type: string
operator:
description: operator represents a
key's relationship to a set of values.
Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of
string values. If the operator is
In or NotIn, the values array must
be non-empty. If the operator is
Exists or DoesNotExist, the values
array must be empty. This array
is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator
is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list
of namespace names that the term applies to.
The term is applied to the union of the namespaces
listed in this field and the ones selected
by namespaceSelector. null or empty namespaces
list and null namespaceSelector means "this
pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the
pods matching the labelSelector in the specified
namespaces, where co-located is defined as
running on a node whose value of the label
with key topologyKey matches that of any node
on which any of the selected pods is running.
Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
type: object
extraArgs:
description: ExtraArgs allows adding additional arguments
to the Control Plane components, such as kube-apiserver,
@@ -195,6 +1098,14 @@ spec:
type: string
type: array
type: object
nodeSelector:
additionalProperties:
type: string
description: 'NodeSelector is a selector which must be true
for the pod to fit on a node. Selector which must match
a node''s labels for the pod to be scheduled on that node.
More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/'
type: object
replicas:
default: 2
format: int32
@@ -289,6 +1200,49 @@ spec:
type: object
type: object
type: object
tolerations:
description: 'If specified, the Tenant Control Plane pod''s
tolerations. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/'
items:
description: The pod this Toleration is attached to tolerates
any taint that matches the triple <key,value,effect> using
the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match.
Empty means match all taint effects. When specified,
allowed values are NoSchedule, PreferNoSchedule and
NoExecute.
type: string
key:
description: Key is the taint key that the toleration
applies to. Empty means match all taint keys. If the
key is empty, operator must be Exists; this combination
means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship
to the value. Valid operators are Exists and Equal.
Defaults to Equal. Exists is equivalent to wildcard
for value, so that a pod can tolerate all taints of
a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period
of time the toleration (which must be of effect NoExecute,
otherwise this field is ignored) tolerates the taint.
By default, it is not set, which means tolerate the
taint forever (do not evict). Zero and negative values
will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration
matches to. If the operator is Exists, the value should
be empty, otherwise just a regular string.
type: string
type: object
type: array
topologySpreadConstraints:
description: TopologySpreadConstraints describes how the Tenant
Control Plane pods ought to spread across topology domains.

View File

@@ -374,6 +374,478 @@ spec:
type: string
type: object
type: object
affinity:
description: 'If specified, the Tenant Control Plane pod''s scheduling constraints. More info: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/'
properties:
nodeAffinity:
description: Describes node affinity scheduling rules for the pod.
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
items:
description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
properties:
preference:
description: A node selector term, associated with the corresponding weight.
properties:
matchExpressions:
description: A list of node selector requirements by node's labels.
items:
description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: The label key that the selector applies to.
type: string
operator:
description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements by node's fields.
items:
description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: The label key that the selector applies to.
type: string
operator:
description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
weight:
description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
format: int32
type: integer
required:
- preference
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
properties:
nodeSelectorTerms:
description: Required. A list of node selector terms. The terms are ORed.
items:
description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
properties:
matchExpressions:
description: A list of node selector requirements by node's labels.
items:
description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: The label key that the selector applies to.
type: string
operator:
description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements by node's fields.
items:
description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: The label key that the selector applies to.
type: string
operator:
description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
type: array
required:
- nodeSelectorTerms
type: object
x-kubernetes-map-type: atomic
type: object
podAffinity:
description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term, associated with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of resources, in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
items:
description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
properties:
labelSelector:
description: A label query over a set of resources, in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
podAntiAffinity:
description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term, associated with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of resources, in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
items:
description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
properties:
labelSelector:
description: A label query over a set of resources, in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
type: object
extraArgs:
description: ExtraArgs allows adding additional arguments to the Control Plane components, such as kube-apiserver, controller-manager, and scheduler.
properties:
@@ -395,6 +867,11 @@ spec:
type: string
type: array
type: object
nodeSelector:
additionalProperties:
type: string
description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/'
type: object
replicas:
default: 2
format: int32
@@ -469,6 +946,29 @@ spec:
type: object
type: object
type: object
tolerations:
description: 'If specified, the Tenant Control Plane pod''s tolerations. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/'
items:
description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
type: string
type: object
type: array
topologySpreadConstraints:
description: TopologySpreadConstraints describes how the Tenant Control Plane pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. In case of nil underlying LabelSelector, the Kamaji one for the given Tenant Control Plane will be used. All topologySpreadConstraints are ANDed.
items:
@@ -1706,7 +2206,7 @@ spec:
- --datastore=kamaji-etcd
command:
- /manager
image: clastix/kamaji:latest
image: clastix/kamaji:v0.1.1
imagePullPolicy: Always
livenessProbe:
httpGet:

View File

@@ -13,4 +13,4 @@ kind: Kustomization
images:
- name: controller
newName: clastix/kamaji
newTag: latest
newTag: v0.1.1

View File

@@ -1,16 +0,0 @@
include etcd/Makefile
deploy_path := $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
.DEFAULT_GOAL := kamaji
.PHONY: etcd-cluster
reqs: etcd-cluster
.PHONY: kamaji
kamaji: reqs
@kubectl apply -f $(deploy_path)/../../config/install.yaml
.PHONY: destroy
destroy: etcd-certificates/cleanup
@kubectl delete -f $(deploy_path)/../../config/install.yaml

View File

@@ -1,26 +0,0 @@
# Deploy Kamaji
## Quickstart with KinD
```sh
make -C kind
```
## Multi-tenant etcd cluster
> This assumes you already have a running Kubernetes cluster and kubeconfig.
```sh
make -C etcd
```
## Multi-tenant cluster using Kine
`kine` is an `etcd` shim that allows using different datastore.
Kamaji actually support the following backends:
- [MySQL](kine/mysql/README.md)
- [PostgreSQL](kine/postgresql/README.md)
> This assumes you already have a running Kubernetes cluster and kubeconfig.

View File

@@ -1,680 +0,0 @@
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
calico_backend: "vxlan"
# Configure the MTU to use for workload interfaces and tunnels.
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
# You can override auto-detection by providing a non-zero value.
veth_mtu: "0"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"log_file_path": "/var/log/calico/cni/cni.log",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico-node
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- watch
- list
- apiGroups:
- ""
resources:
- endpoints
- services
verbs:
- watch
- list
- get
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- watch
- list
- apiGroups:
- ""
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- patch
- apiGroups:
- crd.projectcalico.org
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
- apiGroups:
- crd.projectcalico.org
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups:
- crd.projectcalico.org
resources:
- ipamconfigs
verbs:
- get
- apiGroups:
- crd.projectcalico.org
resources:
- blockaffinities
verbs:
- watch
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico-kube-controllers
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- watch
- list
- get
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- ippools
verbs:
- list
- apiGroups:
- crd.projectcalico.org
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- hostendpoints
verbs:
- get
- list
- create
- update
- delete
- apiGroups:
- crd.projectcalico.org
resources:
- clusterinformations
verbs:
- get
- create
- update
- apiGroups:
- crd.projectcalico.org
resources:
- kubecontrollersconfigurations
verbs:
- get
- create
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: docker.io/calico/cni:v3.20.0
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
securityContext:
privileged: true
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: docker.io/calico/cni:v3.20.0
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
securityContext:
privileged: true
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: docker.io/calico/pod2daemon-flexvol:v3.20.0
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
securityContext:
privileged: true
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: docker.io/calico/node:v3.20.0
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Never"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
value: "Always"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the VXLAN tunnel device.
- name: FELIX_VXLANMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the Wireguard tunnel device.
- name: FELIX_WIREGUARDMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "10.36.0.0/16"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
#- -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
#- -bird-ready
periodSeconds: 10
timeoutSeconds: 10
volumeMounts:
# For maintaining CNI plugin API credentials.
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
# parent directory.
- name: sysfs
mountPath: /sys/fs/
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
mountPropagation: Bidirectional
- name: cni-log-dir
mountPath: /var/log/calico/cni
readOnly: true
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: sysfs
hostPath:
path: /sys/fs/
type: DirectoryOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to access CNI logs.
- name: cni-log-dir
hostPath:
path: /var/log/calico/cni
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: docker.io/calico/kube-controllers:v3.20.0
resouces:
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
livenessProbe:
exec:
command:
- /usr/bin/check-status
- -l
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
periodSeconds: 10
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers

File diff suppressed because it is too large Load Diff

View File

@@ -1,687 +0,0 @@
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
calico_backend: "bird"
# Configure the MTU to use for workload interfaces and tunnels.
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
# You can override auto-detection by providing a non-zero value.
veth_mtu: "0"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"log_file_path": "/var/log/calico/cni/cni.log",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico-node
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- watch
- list
- apiGroups:
- ""
resources:
- endpoints
- services
verbs:
- watch
- list
- get
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- watch
- list
- apiGroups:
- ""
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- patch
- apiGroups:
- crd.projectcalico.org
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
- apiGroups:
- crd.projectcalico.org
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups:
- crd.projectcalico.org
resources:
- ipamconfigs
verbs:
- get
- apiGroups:
- crd.projectcalico.org
resources:
- blockaffinities
verbs:
- watch
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico-kube-controllers
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- watch
- list
- get
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- ippools
verbs:
- list
- apiGroups:
- crd.projectcalico.org
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- hostendpoints
verbs:
- get
- list
- create
- update
- delete
- apiGroups:
- crd.projectcalico.org
resources:
- clusterinformations
verbs:
- get
- create
- update
- apiGroups:
- crd.projectcalico.org
resources:
- kubecontrollersconfigurations
verbs:
- get
- create
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: docker.io/calico/cni:v3.20.0
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
securityContext:
privileged: true
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: docker.io/calico/cni:v3.20.0
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
securityContext:
privileged: true
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: docker.io/calico/pod2daemon-flexvol:v3.20.0
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
securityContext:
privileged: true
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: docker.io/calico/node:v3.20.0
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
- name: IP_AUTODETECTION_METHOD
value: "can-reach=192.168.32.0"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Never"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
value: "Never"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the VXLAN tunnel device.
- name: FELIX_VXLANMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the Wireguard tunnel device.
- name: FELIX_WIREGUARDMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
# - name: CALICO_IPV4POOL_CIDR
# value: "192.168.0.0/16"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
- -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
- -bird-ready
periodSeconds: 10
timeoutSeconds: 10
volumeMounts:
# For maintaining CNI plugin API credentials.
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
# parent directory.
- name: sysfs
mountPath: /sys/fs/
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
mountPropagation: Bidirectional
- name: cni-log-dir
mountPath: /var/log/calico/cni
readOnly: true
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: sysfs
hostPath:
path: /sys/fs/
type: DirectoryOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to access CNI logs.
- name: cni-log-dir
hostPath:
path: /var/log/calico/cni
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: docker.io/calico/kube-controllers:v3.20.0
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
resources:
livenessProbe:
exec:
command:
- /usr/bin/check-status
- -l
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
periodSeconds: 10
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
---

View File

@@ -1,18 +1,21 @@
# azure parameters
export KAMAJI_REGION=westeurope
export KAMAJI_RG=Kamaji
# https://docs.microsoft.com/en-us/azure/aks/faq#why-are-two-resource-groups-created-with-aks
export KAMAJI_CLUSTER=kamaji
export KAMAJI_NODE_RG=MC_${KAMAJI_RG}_${KAMAJI_CLUSTER}_${KAMAJI_REGION}
export KAMAJI_CLUSTER=kamaji
export KAMAJI_VNET_NAME=kamaji-net
export KAMAJI_VNET_ADDRESS=10.224.0.0/12
export KAMAJI_SUBNET_NAME=kamaji-subnet
export KAMAJI_SUBNET_ADDRESS=10.224.0.0/16
# kamaji parameters
export KAMAJI_NAMESPACE=kamaji-system
# tenant cluster parameters
export TENANT_NAMESPACE=tenants
export TENANT_NAMESPACE=default
export TENANT_NAME=tenant-00
export TENANT_DOMAIN=$KAMAJI_REGION.cloudapp.azure.com
export TENANT_VERSION=v1.23.5
export TENANT_VERSION=v1.25.0
export TENANT_PORT=6443 # port used to expose the tenant api server
export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server
export TENANT_POD_CIDR=10.36.0.0/16
@@ -21,10 +24,8 @@ export TENANT_DNS_SERVICE=10.96.0.10
export TENANT_VM_SIZE=Standard_D2ds_v4
export TENANT_VM_IMAGE=UbuntuLTS
export TENANT_RG=$TENANT_NAME
export TENANT_NSG=$TENANT_NAME-nsg
export TENANT_VNET_NAME=$TENANT_NAME
export TENANT_VNET_ADDRESS=172.12.0.0/16
export TENANT_SUBNET_NAME=$TENANT_NAME-subnet
export TENANT_SUBNET_ADDRESS=172.12.10.0/24
export TENANT_VMSS=$TENANT_NAME-vmss
export TENANT_SUBNET_ADDRESS=10.225.0.0/16
export TENANT_VMSS=$TENANT_NAME-vmss

View File

@@ -2,10 +2,10 @@
export KAMAJI_NAMESPACE=kamaji-system
# tenant cluster parameters
export TENANT_NAMESPACE=tenants
export TENANT_NAMESPACE=default
export TENANT_NAME=tenant-00
export TENANT_DOMAIN=clastix.labs
export TENANT_VERSION=v1.23.5
export TENANT_VERSION=v1.25.0
export TENANT_PORT=6443 # port used to expose the tenant api server
export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server
export TENANT_POD_CIDR=10.36.0.0/16

View File

@@ -1,26 +0,0 @@
# Kine integration
[kine](https://github.com/k3s-io/kine) is an `etcd` shim that allows to use a different datastore for your Kubernetes cluster.
Kamaji actually allows to run a shared datastore using different MySQL and PostgreSQL schemas per Tenant.
This can help in overcoming the `etcd` limitation regarding scalability and cluster size, as well with HA and replication.
## Kamaji additional CLI flags
Kamaji read the data store configuration from a cluster-scoped resource named `DataStore`, containing all tha required details to secure a connection using a specific driver.
- [Example of a `etcd` DataStore](./../../config/samples/kamaji_v1alpha1_datastore_etcd.yaml)
- [Example of a `MySQL` DataStore](./../../config/samples/kamaji_v1alpha1_datastore_mysql.yaml)
- [Example of a `PostgreSQL` DataStore](./../../config/samples/kamaji_v1alpha1_datastore_postgresql.yaml)
Once the datastore is running, and the `DataStore` has been created with the required details, we need to provide information about it to Kamaji by using the following flag and pointing to the resource name:
```
--datastore={.metadata.name}
```
## Drivers
Further details on the setup for each driver are available here:
- [MySQL/MariaDB](../deploy/kine/mysql/README.md)
- [PostgreSQL](../deploy/kine/postgresql/README.md)

View File

@@ -15,6 +15,7 @@ mariadb-certificates:
chmod 644 $(ROOT_DIR)/certs/*
mariadb-secret:
@kubectl create namespace kamaji-system --dry-run=client -o yaml | kubectl apply -f -
@kubectl -n kamaji-system create secret generic mysql-config \
--from-file=$(ROOT_DIR)/certs/ca.crt --from-file=$(ROOT_DIR)/certs/ca.key \
--from-file=$(ROOT_DIR)/certs/server.key --from-file=$(ROOT_DIR)/certs/server.crt \

View File

@@ -1,74 +0,0 @@
# MySQL as Kubernetes Storage
Kamaji offers the possibility of having a different storage system than `ETCD` thanks to [kine](https://github.com/k3s-io/kine). One of the implementations is [MySQL](https://www.mysql.com/).
Kamaji project is developed using [kind](https://kind.sigs.k8s.io), therefore, MySQL (or [MariaDB](https://mariadb.org/) in this case) will be deployed into the local kubernetes cluster in order to be used as storage for the tenants.
There is a Makefile to help with the process:
# Setup
Setup of the MySQL/MariaDB backend can be easily issued with a single command.
```bash
$ make mariadb
```
This action will perform all the necessary stuffs to have MariaDB as Kubernetes storage backend using kine.
```shell
rm -rf /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs && mkdir /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs
cfssl gencert -initca /home/prometherion/Documents/clastix/kamaji/deploy/mysql/ca-csr.json | cfssljson -bare /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/ca
2022/08/18 23:52:56 [INFO] generating a new CA key and certificate from CSR
2022/08/18 23:52:56 [INFO] generate received request
2022/08/18 23:52:56 [INFO] received CSR
2022/08/18 23:52:56 [INFO] generating key: rsa-2048
2022/08/18 23:52:56 [INFO] encoded CSR
2022/08/18 23:52:56 [INFO] signed certificate with serial number 310428005543054656774215122317606431230766314770
cfssl gencert -ca=/home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/ca.crt -ca-key=/home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/ca.key \
-config=/home/prometherion/Documents/clastix/kamaji/deploy/mysql/config.json -profile=server \
/home/prometherion/Documents/clastix/kamaji/deploy/mysql/server-csr.json | cfssljson -bare /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/server
2022/08/18 23:52:56 [INFO] generate received request
2022/08/18 23:52:56 [INFO] received CSR
2022/08/18 23:52:56 [INFO] generating key: rsa-2048
2022/08/18 23:52:56 [INFO] encoded CSR
2022/08/18 23:52:56 [INFO] signed certificate with serial number 582698914718104852311252458344736030793138969927
chmod 644 /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/*
secret/mysql-config created
secret/kine-secret created
serviceaccount/mariadb created
service/mariadb created
deployment.apps/mariadb created
persistentvolumeclaim/pvc-mariadb created
```
## Certificate creation
```bash
$ make mariadb-certificates
```
Communication between kine and the backend is encrypted, therefore, a CA and a certificate from it must be created.
## Secret Deployment
```bash
$ make mariadb-secrets
```
Previous certificates and MySQL configuration have to be available in order to be used.
They will be under the secret `kamaji-system:mysql-config`, used by the MySQL/MariaDB instance.
## Deployment
```bash
$ make mariadb-deployment
```
Finally, starts the MySQL/MariaDB installation with all the required settings, such as SSL connection, and configuration.
# Cleanup
```bash
$ make mariadb-destroy
```

View File

@@ -7,6 +7,7 @@ cnpg-setup:
cnpg-deploy:
@kubectl -n cnpg-system rollout status deployment/cnpg-controller-manager
@kubectl create namespace kamaji-system --dry-run=client -o yaml | kubectl apply -f -
@kubectl -n kamaji-system apply -f postgresql.yaml
@while ! kubectl -n kamaji-system get secret postgresql-superuser > /dev/null 2>&1; do sleep 1; done

View File

@@ -1,74 +0,0 @@
# PostgreSQL as Kubernetes Storage
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine).
One of the implementations is [PostgreSQL](https://www.postgresql.org/).
Kamaji project is developed using [kind](https://kind.sigs.k8s.io), therefore, a PostgreSQL instance must be deployed in advance into the local kubernetes cluster in order to be used as storage for the tenants.
For the sake of simplicity, the [cloudnative-pg](https://cloudnative-pg.io/) Operator will be used to simplify the setup of it.
There is a Makefile to help with the process:
## Setup
```bash
$ make postgresql
```
This target will install the `cloudnative-pg`, creating the PostgreSQL instance in the Kamaji Namespace, along with the generation of the required Secret resource for the kine integration.
This action is idempotent and doesn't overwrite values if they already exist.
```shell
namespace/cnpg-system unchanged
customresourcedefinition.apiextensions.k8s.io/backups.postgresql.cnpg.io configured
customresourcedefinition.apiextensions.k8s.io/clusters.postgresql.cnpg.io configured
customresourcedefinition.apiextensions.k8s.io/poolers.postgresql.cnpg.io configured
customresourcedefinition.apiextensions.k8s.io/scheduledbackups.postgresql.cnpg.io configured
serviceaccount/cnpg-manager unchanged
clusterrole.rbac.authorization.k8s.io/cnpg-manager configured
clusterrolebinding.rbac.authorization.k8s.io/cnpg-manager-rolebinding unchanged
configmap/cnpg-default-monitoring unchanged
service/cnpg-webhook-service unchanged
deployment.apps/cnpg-controller-manager unchanged
mutatingwebhookconfiguration.admissionregistration.k8s.io/cnpg-mutating-webhook-configuration configured
validatingwebhookconfiguration.admissionregistration.k8s.io/cnpg-validating-webhook-configuration configured
deployment "cnpg-controller-manager" successfully rolled out
cluster.postgresql.cnpg.io/postgresql unchanged
secret/postgres-root-cert created
```
## Operator setup
```bash
$ make cnpg-setup
```
This target will apply all the required manifests with the `cloudnative-pg` CRD, and required RBAC, and Deployment.
Release [v1.16.0](https://github.com/cloudnative-pg/cloudnative-pg/releases/tag/v1.16.0) has been tested successfully.
## SSL certificate Secret generation
```bash
$ make postgresql-secret
```
This target will download locally the `kubectl-cnpg` utility to generate an SSL certificate required to secure the connection to the PostgreSQL instance.
## Certificate generation
```bash
$ make postgresql-secret
```
Generate the Certificate required to connect to the DataStore.
## Teardown
```bash
$ make postgresql-destroy
```
This will lead to the deletion of the `cloudnative-pg` Operator, along with any instance, and related secrets.
This action is idempotent.

View File

@@ -28,5 +28,5 @@ runcmd:
- sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
- echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
- sudo apt update
- sudo apt install -y kubelet=1.23.5-00 kubeadm=1.23.5-00 kubectl=1.23.5-00
- sudo apt-mark hold kubelet kubeadm kubectl
- sudo apt install -y kubelet=1.25.0-00 kubeadm=1.25.0-00 kubectl=1.25.0-00
- sudo apt-mark hold kubelet kubeadm kubectl containerd

View File

@@ -1,13 +0,0 @@
# Kamaji documentation
- [Core Concepts](./concepts.md)
- [Architecture](./architecture.md)
- [Getting started](./getting-started-with-kamaji.md)
- Guides:
- [Deploy Kamaji](./kamaji-deployment-guide.md)
- [Deploy Kamaji on Azure](./kamaji-azure-deployment-guide.md)
- Deploy Kamaji on AWS
- Deploy Kamaji on GCP
- Deploy Kamaji on OpenStack
- [Reference](./reference.md)
- [Versioning](./versioning.md)

View File

@@ -1 +0,0 @@
# Kamaji architecture

View File

@@ -1,31 +0,0 @@
# Core Concepts
Kamaji is a Kubernetes Operator. It turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_.
## Tenant Control Plane
What makes Kamaji special is that the Control Plane of a _“tenant cluster”_ is just one or more regular pods running in a namespace of the _“admin cluster”_ instead of a dedicated set of Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate. The Tenant Control Plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
High Availability and rolling updates of the Tenant Control Plane pods are provided by a regular Deployment. Autoscaling based on the metrics is available. A Service is used to espose the Tenant Control Plane outside of the _“admin cluster”_. The `LoadBalancer` service type is used, `NodePort` and `ClusterIP` with an Ingress Controller are still viable options, depending on the case.
Kamaji offers a [Custom Resource Definition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing a Tenant Control Plane. This *CRD* is called `TenantControlPlane`, or `tcp` in short.
## Tenant worker nodes
And what about the tenant worker nodes? They are just _"worker nodes"_, i.e. regular virtual or bare metal machines, connecting to the APIs server of the Tenant Control Plane. Kamaji's goal is to manage the lifecycle of hundreds of these _“tenant clusters”_, not only one, so how to add another tenant cluster to Kamaji? As you could expect, you have just deploys a new Tenant Control Plane in one of the _“admin cluster”_ namespace, and then joins the tenant worker nodes to it.
All the tenant clusters built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves.
## Save the state
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each tenant cluster saves the state to be able to store and retrieve data. A dedicated `etcd` cluster for each tenant cluster doesnt scale well for a managed service because `etcd` data persistence can be cumbersome at scale, rising the operational effort to mitigate it. So we have to find an alternative keeping in mind our goal for a resilient and cost-optimized solution at the same time. As we can deploy any Kubernetes cluster with an external `etcd` cluster, we explored this option for the tenant control planes. On the admin cluster, we deploy a multi-tenant `etcd` cluster storing the state of multiple tenant clusters.
With this solution, the resiliency is guaranteed by the usual `etcd` mechanism, and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that we have to operate an external `etcd` cluster, in addition to `etcd` of the _“admin cluster”_ and manage the access to be sure that each _“tenant cluster”_ uses only its data. Also, there are limits in size in `etcd`, defaulted to 2GB and configurable to a maximum of 8GB. Were solving this issue by pooling multiple `etcd` togheter and sharding the Tenant Control Planes.
Optionally, Kamaji offers the possibility of using a different storage system than `etcd` to save the state of the tenants' clusters, like MySQL or PostgreSQL compatible databases, thanks to the [kine](https://github.com/k3s-io/kine) integration.
## Requirements of design
These are requirements of design behind Kamaji:
- Communication between the _“admin cluster”_ and a _“tenant cluster”_ is unidirectional. The _“admin cluster”_ manages a _“tenant cluster”_, but a _“tenant cluster”_ has no awareness of the _“admin cluster”_.
- Communication between different _“tenant clusters”_ is not allowed.
- The worker nodes of tenant should not run anything beyond tenant's workloads.
Goals and scope may vary as the project evolves.

47
docs/content/concepts.md Normal file
View File

@@ -0,0 +1,47 @@
# Concepts
Kamaji is a Kubernetes Operator. It turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_.
These are requirements of the design behind Kamaji:
- Communication between the _“admin cluster”_ and a _“tenant cluster”_ is unidirectional. The _“admin cluster”_ manages a _“tenant cluster”_, but a _“tenant cluster”_ has no awareness of the _“admin cluster”_.
- Communication between different _“tenant clusters”_ is not allowed.
- The worker nodes of tenant should not run anything beyond tenant's workloads.
Goals and scope may vary as the project evolves.
## Tenant Control Plane
What makes Kamaji special is that the Control Plane of a _“tenant cluster”_ is just one or more regular pods running in a namespace of the _“admin cluster”_ instead of a dedicated set of Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate. The Tenant Control Plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
High Availability and rolling updates of the Tenant Control Plane pods are provided by a regular Deployment. Autoscaling based on the metrics is available. A Service is used to espose the Tenant Control Plane outside of the _“admin cluster”_. The `LoadBalancer` service type is used, `NodePort` and `ClusterIP` with an Ingress Controller are still viable options, depending on the case.
Kamaji offers a [Custom Resource Definition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing a Tenant Control Plane. This *CRD* is called `TenantControlPlane`, or `tcp` in short.
All the _“tenant clusters”_ built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves. See [CNCF compliance](reference/conformance.md).
## Tenant worker nodes
And what about the tenant worker nodes? They are just _"worker nodes"_, i.e. regular virtual or bare metal machines, connecting to the APIs server of the Tenant Control Plane. Kamaji's goal is to manage the lifecycle of hundreds of these _“tenant clusters”_, not only one, so how to add another tenant cluster to Kamaji? As you could expect, you have just deploys a new Tenant Control Plane in one of the _“admin cluster”_ namespace, and then joins the tenant worker nodes to it.
We have in roadmap, the Cluster APIs support as well as a Terraform provider so that you can create _“tenant clusters”_ in a declarative way.
## Datastores
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each tenant cluster saves the state to be able to store and retrieve data. A dedicated `etcd` cluster for each tenant cluster doesnt scale well for a managed service because `etcd` data persistence can be cumbersome at scale, rising the operational effort to mitigate it. So we have to find an alternative keeping in mind our goal for a resilient and cost-optimized solution at the same time.
As we can deploy any Kubernetes cluster with an external `etcd` cluster, we explored this option for the tenant control planes. On the admin cluster, we can deploy a multi-tenant `etcd` datastore to save the state of multiple tenant clusters. Kamaji offers a Custom Resource Definition called `DataStore` to provide a declarative approach of managing Tenant datastores. With this solution, the resiliency is guaranteed by the usual `etcd` mechanism, and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that we have to operate an external datastore, in addition to `etcd` of the _“admin cluster”_ and manage the access to be sure that each _“tenant cluster”_ uses only its data.
### Other storage drivers
Kamaji offers the option of using a more capable datastore than `etcd` to save the state of multiple tenants' clusters. Thanks to the native [kine](https://github.com/k3s-io/kine) integration, you can run _MySQL_ or _PostgreSQL_ compatible databases as datastore for _“tenant clusters”_.
### Pooling
By default, Kamaji is expecting to persist all the _“tenant clusters”_ data in a unique datastore that could be backed by different drivers. However, you can pick a different datastore for a specific set of _“tenant clusters”_ that could have different resources assigned or a different tiering. Pooling of multiple datastore is an option you can leverage for a very large set of _“tenant clusters”_ so you can distribute the load properly. As future improvements, we have a _datastore scheduler_ feature in roadmap so that Kamaji itself can assign automatically a _“tenant cluster”_ to the best datastore in the pool.
## Konnectivity
In addition to the standard control plane containers, Kamaji creates an instance of [konnectivity-server](https://kubernetes.io/docs/concepts/architecture/control-plane-node-communication/) running as sidecar container in the `tcp` pod and exposed on port `8132` of the `tcp` service.
This is required when the tenant worker nodes are not reachable from the `tcp` pods. The Konnectivity service consists of two parts: the Konnectivity server in the tenant control plane pod and the Konnectivity agents running on the tenant worker nodes.
After worker nodes joined the tenant control plane, the Konnectivity agents initiate connections to the Konnectivity server and maintain the network connections. After enabling the Konnectivity service, all control plane to worker nodes traffic goes through these connections.
> In Kamaji, Konnectivity is enabled by default and can be disabled when not required.

View File

@@ -0,0 +1,22 @@
# Governance
This document lays out the guidelines under which the Kamaji project will be governed.
The goal is to make sure that the roles and responsibilities are well defined and clarify how decisions are made.
## Roles
In the context of Kamaji project, we consider the following roles:
* __Users__: everyone using Kamaji, typically willing to provide feedback by proposing features and/or filing issues.
* __Contributors__: everyone contributing code, documentation, examples, tests, and participating in feature proposals as well as design discussions.
* __Maintainers__: are responsible for engaging with and assisting contributors to iterate on the contributions until it reaches acceptable quality. Maintainers can decide whether the contributions can be accepted into the project or rejected.
## Release Management
The release process will be governed by Maintainers.
## Roadmap Planning
Maintainers will share roadmap and release versions as milestones in GitHub [project's page](https://github.com/clastix/kamaji).

View File

@@ -0,0 +1,83 @@
# General
Thank you for your interest in contributing to Kamaji. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution.
## Pull Requests
Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
1. You are working against the latest source on the *master* branch.
1. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
1. You open an issue to discuss any significant work: we would hate for your time to be wasted.
To send us a pull request, please:
1. Fork the repository.
1. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it
will be hard for us to focus on your change.
1. Ensure local tests pass.
1. Commit to your fork using clear commit messages.
1. Send us a pull request, answering any default questions in the pull request interface.
1. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
Make sure to keep Pull Requests small and functional to make them easier to review, understand, and look up in commit history. This repository uses "Squash and Commit" to keep our history clean and make it easier to revert changes based on PR. Adding the appropriate documentation, unit tests and e2e tests as part of a feature is the responsibility of the
feature owner, whether it is done in the same Pull Request or not. All the Pull Requests must refer to an already open issue: this is the first phase to contribute also for informing maintainers about the issue.
## Commits
Commit's first line should not exceed 50 columns.
A commit description is welcomed to explain more the changes: just ensure to put a blank line and an arbitrary number of maximum 72 characters long lines, at most one blank line between them.
Please, split changes into several and documented small commits: this will help us to perform a better review. Commits must follow the Conventional Commits Specification, a lightweight convention on top of commit messages. It provides an easy set of rules for creating an explicit commit history; which makes it easier to write automated tools on top of. This convention dovetails with Semantic Versioning, by describing the features, fixes, and breaking changes made in commit messages. See [Conventional Commits Specification](https://www.conventionalcommits.org) to learn about Conventional Commits.
> In case of errors or need of changes to previous commits, fix them squashing to make changes atomic.
## Code convention
Kamaji is written in Golang. The changes must follow the Pull Request method where a _GitHub Action_ will
check the `golangci-lint`, so ensure your changes respect the coding standard.
### golint
You can easily check them issuing the _Make_ recipe `golint`.
```
# make golint
golangci-lint run -c .golangci.yml
```
> Enabled linters and related options are defined in the [.golanci.yml file](https://github.com/clastix/Kamaji/blob/master/.golangci.yml)
## Finding contributions to work on
Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the
default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted'
and 'good first issue' issues are a great place to start.
## Design Docs
A contributor proposes a design with a PR on the repository to allow for revisions and discussions. If a design needs to be discussed before formulating a document for it, make use of GitHub Discussions to involve the community on the discussion.
## GitHub Issues
GitHub Issues are used to file bugs, work items, and feature requests with actionable items/issues.
When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
* A reproducible test case or series of steps
* The version of the code being used
* Any modifications you've made relevant to the bug
* Anything unusual about your environment or deployment
## Miscellanea
Please, add a new single line at end of any file as the current coding style.
## Licensing
See the [LICENSE](https://github.com/clastix/Kamaji/blob/master/LICENSE) file for our project's licensing. We can ask you to confirm the licensing of your contribution.

View File

@@ -0,0 +1,2 @@
# Guidelines
Guidelines for community contributions.

View File

@@ -1,19 +1,19 @@
# Setup a minimal Kamaji for development
# Getting started
This document explains how to deploy a minimal Kamaji setup on [KinD](https://kind.sigs.k8s.io/) for development scopes. Please refer to the [Kamaji documentation](../README.md) for understanding all the terms used in this guide, as for example: `admin cluster` and `tenant control plane`.
This document explains how to deploy a minimal Kamaji setup on [KinD](https://kind.sigs.k8s.io/) for development scopes. Please refer to the [Kamaji documentation](concepts.md) for understanding all the terms used in this guide, as for example: `admin cluster`, `tenant cluster`, and `tenant control plane`.
## Pre-requisites
We assume you have installed on your workstation:
- [Docker](https://docs.docker.com/engine/install/)
- [Docker](https://docker.com)
- [KinD](https://kind.sigs.k8s.io/)
- [kubectl@v1.25.0](https://kubernetes.io/docs/tasks/tools/)
- [kubeadm@v1.25.0](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/)
- [kubectl@v1.25.0](https://kubernetes.io/docs/tasks/tools/#kubectl)
- [kubeadm@v1.25.0](https://kubernetes.io/docs/tasks/tools/#kubeadm)
- [jq](https://stedolan.github.io/jq/)
- [openssl](https://www.openssl.org/)
- [cfssl](https://github.com/cloudflare/cfssl)
- [cfssljson](https://github.com/cloudflare/cfssl)
- [cfssl/cfssljson](https://github.com/cloudflare/cfssl)
> Starting from Kamaji v0.0.2, `kubectl` and `kubeadm` need to meet at least minimum version to `v1.25.0`:
> this is required due to the latest changes addressed from the release Kubernetes 1.25 release regarding the `kubelet-config` ConfigMap required for the node join.
@@ -24,9 +24,9 @@ The instance of Kamaji is made of a single node hosting:
- admin control-plane
- admin worker
- multi-tenant etcd cluster
- multi-tenant datastore
### Standard
### Standard installation
You can install your KinD cluster, ETCD multi-tenant cluster and Kamaji operator with a **single command**:
@@ -38,29 +38,55 @@ Now you can [create your first `TenantControlPlane`](#deploy-tenant-control-plan
### Data store-specific
#### ETCD
Kamaji offers the possibility of using a different storage system than `ETCD` for the tenants, like `MySQL` or `PostgreSQL` compatible databases.
The multi-tenant etcd cluster is deployed as statefulset into the Kamaji node.
Run `make reqs` to setup Kamaji's requisites on KinD:
First, setup a KinD cluster:
```bash
$ make -C deploy/kind reqs
$ make -C deploy/kind kind
```
At this moment you will have your KinD up and running and ETCD cluster in multitenant mode.
#### ETCD
Deploy a multi-tenant `ETCD` cluster into the Kamaji node:
```bash
$ make -C deploy/kind etcd-cluster
```
Now you're ready to [install Kamaji operator](#install-kamaji).
#### Kine
#### MySQL
> The MySQL-compatible cluster provisioning is omitted here.
Deploy a MySQL/MariaDB backend into the Kamaji node:
Kamaji offers the possibility of using a different storage system than `ETCD` for the tenants, like MySQL or PostgreSQL compatible databases.
```bash
$ make -C deploy/kine/mysql mariadb
```
Read it more in the provided [guide](../deploy/kine/README.md).
Adjust the Kamaji install manifest `config/install.yaml` according to the example of a MySQL DataStore `config/samples/kamaji_v1alpha1_datastore_mysql.yaml` and make sure Kamaji uses the proper datastore name:
Assuming you adjusted the [Kamaji manifest](../config/install.yaml) to connect to Kine and compatible database using the proper driver, you can now install it.
```
--datastore={.metadata.name}
```
Now you're ready to [install Kamaji operator](#install-kamaji).
#### PostgreSQL
Deploy a PostgreSQL backend into the Kamaji node:
```bash
$ make -C deploy/kine/postgresql postgresql
```
Adjust the Kamaji install manifest `config/install.yaml` according to the example of a PostgreSQL DataStore `config/samples/kamaji_v1alpha1_datastore_postgresql.yaml` and make sure Kamaji uses the proper datastore name:
```
--datastore={.metadata.name}
```
Now you're ready to [install Kamaji operator](#install-kamaji).
### Install Kamaji
@@ -68,6 +94,8 @@ Assuming you adjusted the [Kamaji manifest](../config/install.yaml) to connect t
$ kubectl apply -f config/install.yaml
```
> If you experience some errors during the apply of the manifest as `resource mapping not found ... ensure CRDs are installed first`, just apply it again.
### Deploy Tenant Control Plane
Now it is the moment of deploying your first tenant control plane.
@@ -150,7 +178,7 @@ $ kubectl create -f https://raw.githubusercontent.com/aojea/kindnet/master/insta
### Join worker nodes
```bash
$ make kamaji-kind-worker-join
$ make -C deploy/kind kamaji-kind-worker-join
```
> To add more worker nodes, run again the command above.
@@ -163,12 +191,12 @@ NAME STATUS ROLES AGE VERSION
d2d4b468c9de Ready <none> 44s v1.23.4
```
> For more complex scenarios (exposing port, different version and so on), run `join-node.bash`
> For more complex scenarios (exposing port, different version and so on), run `join-node.bash`.
Tenant control plane provision has been finished in a minimal Kamaji setup based on KinD. Therefore, you could develop, test and make your own experiments with Kamaji.
## Cleanup
```bash
$ make destroy
$ make -C deploy/kind destroy
```

View File

@@ -0,0 +1,3 @@
# How to Guides
This section of the Kamaji documentation contains pages that show how to do a specific thing, typically by giving a sequence of steps.

View File

@@ -1,12 +1,19 @@
# Setup Kamaji on Azure
This guide will lead you through the process of creating a working Kamaji setup on on MS Azure. It requires:
This guide will lead you through the process of creating a working Kamaji setup on on MS Azure.
- one bootstrap local workstation
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
The guide requires:
- one bootstrap workstation
- an AKS Kubernetes cluster to run the Admin and Tenant Control Planes
- an arbitrary number of Azure virtual machines to host `Tenant`s' workloads
## Summary
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
* [Access Admin cluster](#access-admin-cluster)
* [Install DataStore](#install-datastore)
* [Install Kamaji controller](#install-kamaji-controller)
* [Create Tenant Cluster](#create-tenant-cluster)
* [Cleanup](#cleanup)
@@ -21,10 +28,10 @@ cd kamaji/deploy
We assume you have installed on your workstation:
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
- [helm](https://helm.sh/docs/intro/install/)
- [jq](https://stedolan.github.io/jq/)
- [openssl](https://www.openssl.org/)
- [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)
Make sure you have a valid Azure subscription, and login to Azure:
@@ -47,20 +54,37 @@ az group create \
--name $KAMAJI_RG \
--location $KAMAJI_REGION
az network vnet create \
--resource-group $KAMAJI_RG \
--name $KAMAJI_VNET_NAME \
--location $KAMAJI_REGION \
--address-prefix $KAMAJI_VNET_ADDRESS
az network vnet subnet create \
--resource-group $KAMAJI_RG \
--name $KAMAJI_SUBNET_NAME \
--vnet-name $KAMAJI_VNET_NAME \
--address-prefixes $KAMAJI_SUBNET_ADDRESS
KAMAJI_SUBNET_ID=$(az network vnet subnet show \
--resource-group ${KAMAJI_RG} \
--vnet-name ${KAMAJI_VNET_NAME} \
--name ${KAMAJI_SUBNET_NAME} \
--query id --output tsv)
az aks create \
--resource-group $KAMAJI_RG \
--name $KAMAJI_CLUSTER \
--location $KAMAJI_REGION \
--vnet-subnet-id $KAMAJI_SUBNET_ID \
--zones 1 2 3 \
--node-count 3 \
--nodepool-name $KAMAJI_CLUSTER \
--ssh-key-value @~/.ssh/id_rsa.pub \
--no-wait
--nodepool-name $KAMAJI_CLUSTER
```
Once the cluster formation succedes, get credentials to access the cluster as admin
```
```bash
az aks get-credentials \
--resource-group $KAMAJI_RG \
--name $KAMAJI_CLUSTER
@@ -68,41 +92,41 @@ az aks get-credentials \
And check you can access:
```
```bash
kubectl cluster-info
```
## Install Kamaji
There are multiple ways to deploy Kamaji, including a [single YAML file](../config/install.yaml) and [Helm Chart](../charts/kamaji).
## Install datastore
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of an unamanaged `etcd`. However, a managed `etcd` is highly recommended in production.
### Multi-tenant datastore
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters.
Install a multi-tenant `etcd` in the admin cluster as three replicas StatefulSet with data persistence.
The Helm [Chart](../charts/kamaji/) provides the installation of an internal `etcd`.
However, an externally managed `etcd` is highly recommended.
If you'd like to use an external one, you can specify the overrides by setting the value `etcd.deploy=false`.
Optionally, Kamaji offers the possibility of using a different storage system than `etcd` for the tenants' clusters, like MySQL or PostgreSQL compatible database, thanks to the [kine](https://github.com/k3s-io/kine) integration documented [here](../deploy/kine/README.md).
### Install with Helm Chart
Install with the `helm` in a dedicated namespace of the Admin cluster:
As alternative, the [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a manged multi-tenant `etcd` as 3 replicas StatefulSet with data persistence:
```bash
helm install --create-namespace --namespace kamaji-system kamaji clastix/kamaji
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install etcd clastix/kamaji-etcd -n kamaji-system --create-namespace
```
The Kamaji controller and the multi-tenant `etcd` are now running:
Optionally, Kamaji offers the possibility of using a different storage system for the tenants' clusters, as MySQL or PostgreSQL compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
## Install Kamaji Controller
Install Kamaji with `helm` using an unmanaged `etcd` as datastore:
```bash
kubectl -n kamaji-system get pods
NAME READY STATUS RESTARTS AGE
etcd-0 1/1 Running 0 120m
etcd-1 1/1 Running 0 120m
etcd-2 1/1 Running 0 119m
kamaji-857fcdf599-4fb2p 2/2 Running 0 120m
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
```
You just turned your AKS cluster into a Kamaji cluster to run multiple Tenant Control Planes.
Alternatively, if you opted for a managed `etcd` datastore:
```
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace --set etcd.deploy=false
```
Congratulations! You just turned your Azure Kubernetes AKS cluster into a Kamaji cluster capable to run multiple Tenant Control Planes.
## Create Tenant Cluster
@@ -135,17 +159,17 @@ spec:
resources:
apiServer:
requests:
cpu: 500m
cpu: 250m
memory: 512Mi
limits: {}
controllerManager:
requests:
cpu: 250m
cpu: 125m
memory: 256Mi
limits: {}
scheduler:
requests:
cpu: 250m
cpu: 125m
memory: 256Mi
limits: {}
service:
@@ -198,31 +222,30 @@ spec:
type: LoadBalancer
EOF
kubectl create namespace ${TENANT_NAMESPACE}
kubectl apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
```
Make sure:
- the following annotation: `service.beta.kubernetes.io/azure-load-balancer-internal=true` is set on the `tcp` service. It tells Azure to expose the service within an internal loadbalancer.
- the following annotation: `service.beta.kubernetes.io/azure-dns-label-name=${TENANT_NAME}` is set the public loadbalancer service. It tells Azure to expose the Tenant Control Plane with domain name: `${TENANT_NAME}.${TENANT_DOMAIN}`.
- the following annotation: `service.beta.kubernetes.io/azure-dns-label-name=${TENANT_NAME}` is set the public loadbalancer service. It tells Azure to expose the Tenant Control Plane with public domain name: `${TENANT_NAME}.${TENANT_DOMAIN}`.
### Working with Tenant Control Plane
Check the access to the Tenant Control Plane:
```
```bash
curl -k https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.azure.com/healthz
curl -k https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.azure.com/version
```
Let's retrieve the `kubeconfig` in order to work with it:
```
```bash
kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-admin-kubeconfig -o json \
| jq -r '.data["admin.conf"]' \
| base64 -d \
| base64 --decode \
> ${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig config \
@@ -248,118 +271,65 @@ NAME ENDPOINTS AGE
kubernetes 10.240.0.100:6443 57m
```
### Prepare the Infrastructure for the Tenant virtual machines
Kamaji provides Control Plane as a Service, so the tenant user can join his own virtual machines as worker nodes. Each tenant can place his virtual machines in a dedicated Azure virtual network.
### Prepare worker nodes to join
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other tools, as for example, Terrform.
Prepare the Tenant infrastructure:
```
az group create \
--name $TENANT_RG \
--location $KAMAJI_REGION
az network nsg create \
--resource-group $TENANT_RG \
--name $TENANT_NSG
az network nsg rule create \
--resource-group $TENANT_RG \
--nsg-name $TENANT_NSG \
--name $TENANT_NSG-ssh \
--protocol tcp \
--priority 1000 \
--destination-port-range 22 \
--access allow
az network vnet create \
--resource-group $TENANT_RG \
--name $TENANT_VNET_NAME \
--address-prefix $TENANT_VNET_ADDRESS \
--subnet-name $TENANT_SUBNET_NAME \
--subnet-prefix $TENANT_SUBNET_ADDRESS
az network vnet subnet create \
--resource-group $TENANT_RG \
--vnet-name $TENANT_VNET_NAME \
--name $TENANT_SUBNET_NAME \
--address-prefixes $TENANT_SUBNET_ADDRESS \
--network-security-group $TENANT_NSG
```
Connection between the Tenant virtual network and the Kamaji AKS virtual network leverages on the [Azure Network Peering](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-peering-overview).
Enable the network peering between the Tenant Virtual Network and the Kamaji AKS Virtual Network:
Create an Azure VM Stateful Set to host worker nodes
```bash
KAMAJI_VNET_NAME=`az network vnet list -g $KAMAJI_NODE_RG --query [].name --out tsv`
KAMAJI_VNET_ID=`az network vnet list -g $KAMAJI_NODE_RG --query [].id --out tsv`
TENANT_VNET_ID=`az network vnet list -g $TENANT_RG --query [].id --out tsv`
az network vnet peering create \
--resource-group $TENANT_RG \
--name $TENANT_NAME-$KAMAJI_CLUSTER \
--vnet-name $TENANT_VNET_NAME \
--remote-vnet $KAMAJI_VNET_ID \
--allow-vnet-access
az network vnet peering create \
--resource-group $KAMAJI_NODE_RG \
--name $KAMAJI_CLUSTER-$TENANT_NAME \
az network vnet subnet create \
--resource-group $KAMAJI_RG \
--name $TENANT_SUBNET_NAME \
--vnet-name $KAMAJI_VNET_NAME \
--remote-vnet $TENANT_VNET_ID \
--allow-vnet-access
```
--address-prefixes $TENANT_SUBNET_ADDRESS
[Azure Network Security Groups](https://docs.microsoft.com/en-us/azure/virtual-network/network-security-groups-overview) can be used to control the traffic between the Tenant virtual network and the Kamaji AKS virtual network for a stronger isolation. See the required [ports and protocols](https://kubernetes.io/docs/reference/ports-and-protocols/) between Kubernetes control plane and worker nodes.
### Create the tenant virtual machines
Create an Azure VM Stateful Set to host virtual machines
```
az vmss create \
--name $TENANT_VMSS \
--resource-group $TENANT_RG \
--resource-group $KAMAJI_RG \
--image $TENANT_VM_IMAGE \
--public-ip-per-vm \
--vnet-name $TENANT_VNET_NAME \
--vnet-name $KAMAJI_VNET_NAME \
--subnet $TENANT_SUBNET_NAME \
--ssh-key-value @~/.ssh/id_rsa.pub \
--computer-name-prefix $TENANT_NAME- \
--nsg $TENANT_NSG \
--custom-data ./tenant-cloudinit.yaml \
--instance-count 0
--load-balancer "" \
--instance-count 0
az vmss update \
--resource-group $TENANT_RG \
--resource-group $KAMAJI_RG \
--name $TENANT_VMSS \
--set virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].enableIPForwarding=true
az vmss scale \
--resource-group $TENANT_RG \
--resource-group $KAMAJI_RG \
--name $TENANT_VMSS \
--new-capacity 3
```
### Join the tenant virtual machines to the tenant control plane
The current approach for joining nodes is to use the `kubeadm` one therefore, we will create a bootstrap token to perform the action:
### Join worker nodes
The current approach for joining nodes is to use `kubeadm` and therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable:
```bash
TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."spec.loadBalancerIP")
JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command |cut -d" " -f4-)
```
A bash loop will be used to join all the available nodes.
```bash
HOSTS=($(az vmss list-instance-public-ips \
--resource-group $TENANT_RG \
--name $TENANT_VMSS \
--query "[].ipAddress" \
--output tsv))
VMIDS=($(az vmss list-instances \
--resource-group $KAMAJI_RG \
--name $TENANT_VMSS \
--query [].instanceId \
--output tsv))
for i in ${!HOSTS[@]}; do
HOST=${HOSTS[$i]}
echo $HOST
ssh ${USER}@${HOST} -t ${JOIN_CMD};
for i in ${!VMIDS[@]}; do
VMID=${VMIDS[$i]}
az vmss run-command create \
--name join-tenant-control-plane \
--vmss-name $TENANT_VMSS \
--resource-group $KAMAJI_RG \
--instance-id ${VMID} \
--script "${JOIN_CMD}"
done
```
@@ -369,51 +339,43 @@ Checking the nodes:
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
tenant-00-000000 NotReady <none> 112s v1.23.5
tenant-00-000002 NotReady <none> 92s v1.23.5
tenant-00-000003 NotReady <none> 71s v1.23.5
tenant-00-000000 NotReady <none> 112s v1.25.0
tenant-00-000002 NotReady <none> 92s v1.25.0
tenant-00-000003 NotReady <none> 71s v1.25.0
```
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In our case, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico).
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In this guide, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico), but feel free to use one of your taste.
Download the latest stable Calico manifest:
```bash
kubectl apply -f calico-cni/calico-crd.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl apply -f calico-cni/calico-azure.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/calico.yaml -O
```
And after a while, `kube-system` pods will be running.
As per [documentation](https://projectcalico.docs.tigera.io/reference/public-cloud/azure), Calico in VXLAN mode is supported on Azure while IPIP packets are blocked by the Azure network fabric. Make sure you edit the manifest above and set the following variables:
- `CLUSTER_TYPE="k8s"`
- `CALICO_IPV4POOL_IPIP="Never"`
- `CALICO_IPV4POOL_VXLAN="Always"`
Apply to the tenant cluster:
```bash
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get po -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-8594699699-dlhbj 1/1 Running 0 3m
calico-node-kxf6n 1/1 Running 0 3m
calico-node-qtdlw 1/1 Running 0 3m
coredns-64897985d-2v5lc 1/1 Running 0 5m
coredns-64897985d-nq276 1/1 Running 0 5m
kube-proxy-cwdww 1/1 Running 0 3m
kube-proxy-m48v4 1/1 Running 0 3m
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig apply -f calico.yaml
```
And the nodes will be ready
And after a while, nodes will be ready
```bash
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
tenant-00-000000 Ready <none> 3m38s v1.23.5
tenant-00-000002 Ready <none> 3m18s v1.23.5
tenant-00-000003 Ready <none> 2m57s v1.23.5
tenant-00-000000 Ready <none> 3m38s v1.25.0
tenant-00-000002 Ready <none> 3m18s v1.25.0
tenant-00-000003 Ready <none> 2m57s v1.25.0
```
## Cleanup
To get rid of the Tenant infrastructure, remove the RESOURCE_GROUP:
```
az group delete --name $TENANT_RG --yes --no-wait
```
To get rid of the Kamaji infrastructure, remove the RESOURCE_GROUP:
```

View File

@@ -1,14 +1,19 @@
# Setup Kamaji
This guide will lead you through the process of creating a working Kamaji setup on a generic Kubernetes cluster. It requires:
# Setup Kamaji on a generic infrastructure
This guide will lead you through the process of creating a working Kamaji setup on a generic infrastructure, either virtual or bare metal.
- one bootstrap local workstation
- a Kubernetes cluster 1.22+, to run the Admin and Tenant Control Planes
- an arbitrary number of machines to host Tenants' workloads
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
> In this guide, we assume the machines are running `Ubuntu 20.04`.
The guide requires:
- one bootstrap workstation
- a Kubernetes cluster to run the Admin and Tenant Control Planes
- an arbitrary number of machines to host `Tenant`s' workloads
## Summary
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
* [Access Admin cluster](#access-admin-cluster)
* [Install DataStore](#install-datastore)
* [Install Kamaji controller](#install-kamaji-controller)
* [Create Tenant Cluster](#create-tenant-cluster)
* [Cleanup](#cleanup)
@@ -23,10 +28,10 @@ cd kamaji/deploy
We assume you have installed on your workstation:
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
- [helm](https://helm.sh/docs/intro/install/)
- [jq](https://stedolan.github.io/jq/)
- [openssl](https://www.openssl.org/)
## Access Admin cluster
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters.
@@ -37,42 +42,46 @@ Throughout the following instructions, shell variables are used to indicate valu
source kamaji.env
```
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the admin cluster should provide:
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the admin cluster should provide at least:
- CNI module installed, eg. [Calico](https://github.com/projectcalico/calico), [Cilium](https://github.com/cilium/cilium).
- CSI module installed with a Storage Class for the Tenants' `etcd`.
- CSI module installed with a Storage Class for the Tenants' `etcd`. Local Persistent Volumes are an option.
- Support for LoadBalancer Service Type, or alternatively, an Ingress Controller, eg. [ingress-nginx](https://github.com/kubernetes/ingress-nginx), [haproxy](https://github.com/haproxytech/kubernetes-ingress).
- Monitoring Stack, eg. [Prometheus](https://github.com/prometheus-community).
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into Kamaji Admin Cluster.
## Install Kamaji
There are multiple ways to deploy Kamaji, including a [single YAML file](../config/install.yaml) and [Helm Chart](../charts/kamaji).
## Install datastore
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of an unamanaged `etcd`. However, a managed `etcd` is highly recommended in production.
### Multi-tenant datastore
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters. Install a multi-tenant `etcd` in the admin cluster as three replicas StatefulSet with data persistence. The Helm [Chart](../charts/kamaji/) provides the installation of an internal `etcd`. However, an externally managed `etcd` is highly recommended. If you'd like to use an external one, you can specify the overrides by setting the value `etcd.deploy=false`.
Optionally, Kamaji offers the possibility of using a different storage system than `etcd` for the tenants' clusters, like MySQL compatible database, thanks to the [kine](https://github.com/k3s-io/kine) integration [here](../deploy/kine/mysql/README.md).
### Install with Helm Chart
Install with the `helm` in a dedicated namespace of the Admin cluster:
As alternative, the [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a manged multi-tenant `etcd` as 3 replicas StatefulSet with data persistence:
```bash
helm install --create-namespace --namespace kamaji-system kamaji clastix/kamaji
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install etcd clastix/kamaji-etcd -n kamaji-system --create-namespace
```
The Kamaji controller and the multi-tenant `etcd` are now running:
Optionally, Kamaji offers the possibility of using a different storage system for the tenants' clusters, as MySQL or PostgreSQL compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
## Install Kamaji Controller
Install Kamaji with `helm` using an unmanaged `etcd` as datastore:
```bash
kubectl -n kamaji-system get pods
NAME READY STATUS RESTARTS AGE
etcd-0 1/1 Running 0 120m
etcd-1 1/1 Running 0 120m
etcd-2 1/1 Running 0 119m
kamaji-857fcdf599-4fb2p 2/2 Running 0 120m
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
```
You just turned your Kubernetes cluster into a Kamaji cluster to run multiple Tenant Control Planes.
Alternatively, if you opted for a managed `etcd` datastore:
```bash
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace --set etcd.deploy=false
```
Congratulations! You just turned your Kubernetes cluster into a Kamaji cluster capable to run multiple Tenant Control Planes.
## Create Tenant Cluster
@@ -101,17 +110,17 @@ spec:
resources:
apiServer:
requests:
cpu: 500m
cpu: 250m
memory: 512Mi
limits: {}
controllerManager:
requests:
cpu: 250m
cpu: 125m
memory: 256Mi
limits: {}
scheduler:
requests:
cpu: 250m
cpu: 125m
memory: 256Mi
limits: {}
service:
@@ -146,8 +155,7 @@ spec:
limits: {}
EOF
kubectl create namespace ${TENANT_NAMESPACE}
kubectl apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
```
After a few minutes, check the created resources in the tenants namespace and when ready it will look similar to the following:
@@ -171,39 +179,8 @@ service/tenant-00 LoadBalancer 10.32.132.241 192.168.32.240 6443:32152/T
The regular Tenant Control Plane containers: `kube-apiserver`, `kube-controller-manager`, `kube-scheduler` are running unchanged in the `tcp` pods instead of dedicated machines and they are exposed through a service on the port `6443` of worker nodes in the Admin cluster.
```yaml
apiVersion: v1
kind: Service
metadata:
name: tenant-00
spec:
clusterIP: 10.32.233.177
loadBalancerIP: 192.168.32.240
ports:
- name: kube-apiserver
nodePort: 31073
port: 6443
protocol: TCP
targetPort: 6443
- name: konnectivity-server
nodePort: 32125
port: 8132
protocol: TCP
targetPort: 8132
selector:
kamaji.clastix.io/soot: tenant-00
type: LoadBalancer
```
The `LoadBalancer` service type is used to expose the Tenant Control Plane. However, `NodePort` and `ClusterIP` with an Ingress Controller are still viable options, depending on the case. High Availability and rolling updates of the Tenant Control Plane are provided by the `tcp` Deployment and all the resources reconcilied by the Kamaji controller.
### Konnectivity
In addition to the standard control plane containers, Kamaji creates an instance of [konnectivity-server](https://kubernetes.io/docs/concepts/architecture/control-plane-node-communication/) running as sidecar container in the `tcp` pod and exposed on port `8132` of the `tcp` service.
This is required when the tenant worker nodes are not reachable from the `tcp` pods. The Konnectivity service consists of two parts: the Konnectivity server in the tenant control plane pod and the Konnectivity agents running on the tenant worker nodes. After worker nodes joined the tenant control plane, the Konnectivity agents initiate connections to the Konnectivity server and maintain the network connections. After enabling the Konnectivity service, all control plane to worker nodes traffic goes through these connections.
> In Kamaji, Konnectivity is enabled by default and can be disabled when not required.
### Working with Tenant Control Plane
Collect the external IP address of the `tcp` service:
@@ -224,7 +201,7 @@ The `kubeconfig` required to access the Tenant Control Plane is stored in a secr
```bash
kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-admin-kubeconfig -o json \
| jq -r '.data["admin.conf"]' \
| base64 -d \
| base64 --decode \
> ${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
```
@@ -255,38 +232,36 @@ kubernetes 192.168.32.240:6443 18m
And make sure it is `${TENANT_ADDR}:${TENANT_PORT}`.
### Preparing Worker Nodes to join
### Prepare worker nodes to join
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other IaC tools.
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other tools, as for example, Terraform.
Use bash script `nodes-prerequisites.sh` to install the dependencies on all the worker nodes:
You can use the provided helper script `/deploy/nodes-prerequisites.sh`, in order to install the dependencies on all the worker nodes:
- Install `containerd` as container runtime
- Install `crictl`, the command line for working with `containerd`
- Install `kubectl`, `kubelet`, and `kubeadm` in the desired version
> Warning: we assume worker nodes are machines running `Ubuntu 20.04`
> Warning: the script assumes all worker nodes are running `Ubuntu 20.04`. Make sure to adapt the script if you're using a different distribution.
Run the installation script:
Run the script:
```bash
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
./nodes-prerequisites.sh ${TENANT_VERSION:1} ${HOSTS[@]}
```
### Join Command
The current approach for joining nodes is to use the kubeadm one therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable.
### Join worker nodes
The current approach for joining nodes is to use `kubeadm` and therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable:
```bash
JOIN_CMD=$(echo "sudo ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command)
```
### Adding Worker Nodes
A bash loop will be used to join all the available nodes.
```bash
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh ${USER}@${HOST} -t ${JOIN_CMD};
@@ -299,163 +274,42 @@ Checking the nodes:
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
tenant-00-worker-00 NotReady <none> 25s v1.23.5
tenant-00-worker-01 NotReady <none> 17s v1.23.5
tenant-00-worker-02 NotReady <none> 9s v1.23.5
tenant-00-worker-00 NotReady <none> 25s v1.25.0
tenant-00-worker-01 NotReady <none> 17s v1.25.0
tenant-00-worker-02 NotReady <none> 9s v1.25.0
```
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In our case, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico).
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In this guide, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico), but feel free to use one of your taste.
Download the latest stable Calico manifest:
```bash
kubectl apply -f calico-cni/calico-crd.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl apply -f calico-cni/calico.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/calico.yaml -O
```
And after a while, `kube-system` pods will be running.
Before to apply the Calico manifest, you can customize it as necessary according to your preferences.
Apply to the tenant cluster:
```bash
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-8594699699-dlhbj 1/1 Running 0 3m
calico-node-kxf6n 1/1 Running 0 3m
calico-node-qtdlw 1/1 Running 0 3m
coredns-64897985d-2v5lc 1/1 Running 0 5m
coredns-64897985d-nq276 1/1 Running 0 5m
kube-proxy-cwdww 1/1 Running 0 3m
kube-proxy-m48v4 1/1 Running 0 3m
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig apply -f calico.yaml
```
And the nodes will be ready
And after a while, nodes will be ready
```bash
kubectl get nodes --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
tenant-00-worker-00 Ready <none> 2m48s v1.23.5
tenant-00-worker-01 Ready <none> 2m40s v1.23.5
tenant-00-worker-02 Ready <none> 2m32s v1.23.5
```
## Smoke test
The tenant cluster is now ready to accept workloads.
Export its `kubeconfig` file
```bash
export KUBECONFIG=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
```
#### Deployment
Deploy a `nginx` application on the tenant cluster
```bash
kubectl create deployment nginx --image=nginx
```
and check the `nginx` pod gets scheduled
```bash
kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx-6799fc88d8-4sgcb 1/1 Running 0 33s 172.12.121.1 worker02
```
#### Port Forwarding
Verify the ability to access applications remotely using port forwarding.
Retrieve the full name of the `nginx` pod:
```bash
POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath="{.items[0].metadata.name}")
```
Forward port 8080 on your local machine to port 80 of the `nginx` pod:
```bash
kubectl port-forward $POD_NAME 8080:80
Forwarding from 127.0.0.1:8080 -> 80
Forwarding from [::1]:8080 -> 80
```
In a new terminal make an HTTP request using the forwarding address:
```bash
curl --head http://127.0.0.1:8080
HTTP/1.1 200 OK
Server: nginx/1.21.0
Date: Sat, 19 Jun 2021 08:19:01 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 May 2021 12:28:56 GMT
Connection: keep-alive
ETag: "60aced88-264"
Accept-Ranges: bytes
```
Switch back to the previous terminal and stop the port forwarding to the `nginx` pod.
#### Logs
Verify the ability to retrieve container logs.
Print the `nginx` pod logs:
```bash
kubectl logs $POD_NAME
...
127.0.0.1 - - [19/Jun/2021:08:19:01 +0000] "HEAD / HTTP/1.1" 200 0 "-" "curl/7.68.0" "-"
```
#### Kubelet tunnel
Verify the ability to execute commands in a container.
Print the `nginx` version by executing the `nginx -v` command in the `nginx` container:
```bash
kubectl exec -ti $POD_NAME -- nginx -v
nginx version: nginx/1.21.0
```
#### Services
Verify the ability to expose applications using a service.
Expose the `nginx` deployment using a `NodePort` service:
```bash
kubectl expose deployment nginx --port 80 --type NodePort
```
Retrieve the node port assigned to the `nginx` service:
```bash
NODE_PORT=$(kubectl get svc nginx \
--output=jsonpath='{range .spec.ports[0]}{.nodePort}')
```
Retrieve the IP address of a worker instance and make an HTTP request:
```bash
curl -I http://${WORKER0}:${NODE_PORT}
HTTP/1.1 200 OK
Server: nginx/1.21.0
Date: Sat, 19 Jun 2021 09:29:01 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 May 2021 12:28:56 GMT
Connection: keep-alive
ETag: "60aced88-264"
Accept-Ranges: bytes
tenant-00-worker-00 Ready <none> 2m48s v1.25.0
tenant-00-worker-01 Ready <none> 2m40s v1.25.0
tenant-00-worker-02 Ready <none> 2m32s v1.25.0
```
## Cleanup
Remove the worker nodes joined the tenant control plane
```bash
kubectl delete nodes --all --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig delete nodes --all
```
For each worker node, login and clean it
@@ -475,3 +329,5 @@ Delete the tenant control plane from kamaji
```bash
kubectl delete -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
```
That's all folks!

View File

@@ -0,0 +1,5 @@
# MySQL as Kubernetes Storage
Kamaji offers the possibility of having a different storage system than `ETCD` thanks to [kine](https://github.com/k3s-io/kine) integration. One of the implementations is [MySQL](https://www.mysql.com/).
> A detailed guide for production setup will be released soon. Please refer to [Getting Started Guide](../getting-started.md) for a demo setup with KinD.

View File

@@ -0,0 +1,6 @@
# PostgreSQL as Kubernetes Storage
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine) integration.
One of the implementations is [PostgreSQL](https://www.postgresql.org/).
> A detailed guide for production setup will be released soon. Please refer to [Getting Started Guide](../getting-started.md) for a demo setup with KinD.

View File

@@ -0,0 +1,13 @@
# Tenant Cluster Upgrade
The process of upgrading a _“tenant cluster”_ consists in two steps:
1. Upgrade the Tenant Control Plane
2. Upgrade of Tenant Worker Nodes
## Upgrade of Tenant Control Plane
You should patch the `TenantControlPlane.spec.kubernetes.version` custom resource with a new compatible value according to the [Version Skew Policy](https://kubernetes.io/releases/version-skew-policy/).
> Note: during the upgrade, a new ReplicaSet of Tenant Control Plane pod will be created, so make sure you have at least two pods to avoid service disruption.
## Upgrade of Tenant Worker Nodes
As currently Kamaji is not providing any helpers for Tenant Worker Nodes, you should make sure to upgrade them manually, for example, with the help of `kubeadm`. We have in roadmap, the Cluster APIs support so that you can upgrade _“tenant clusters”_ in a fully declarative way.

View File

Before

Width:  |  Height:  |  Size: 189 KiB

After

Width:  |  Height:  |  Size: 189 KiB

View File

Before

Width:  |  Height:  |  Size: 184 KiB

After

Width:  |  Height:  |  Size: 184 KiB

51
docs/content/index.md Normal file
View File

@@ -0,0 +1,51 @@
# Kamaji
**Kamaji** deploys and operates Kubernetes at scale with a fraction of the operational burden.
## How it works
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. What makes Kamaji special is that Control Planes of _“tenant clusters”_ are just regular pods running in the _“admin cluster”_ instead of dedicated Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate. View [Concepts](concepts.md) for a deeper understanding of principles behind Kamaji's design.
![Architecture](images/kamaji-light.png#gh-light-mode-only)
![Architecture](images/kamaji-dark.png#gh-dark-mode-only)
All the tenant clusters built with Kamaji are fully compliant [CNCF Certified Kubernetes](https://www.cncf.io/certification/software-conformance/) and are compatible with the standard toolchains everybody knows and loves.
<p align="center" style="padding: 6px 6px">
<img src="https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/certified-kubernetes/versionless/color/certified-kubernetes-color.png" width="200" />
</p>
## Features
- **Self Service Kubernetes:** leave users the freedom to self-provision their Kubernetes clusters according to the assigned boundaries.
- **Multi-cluster Management:** centrally manage multiple tenant clusters from a single admin cluster. Happy SREs.
- **Cheaper Control Planes:** place multiple tenant control planes on a single node, instead of having three nodes for a single control plane.
- **Stronger Multi-Tenancy:** leave tenants to access the control plane with admin permissions while keeping the tenant isolated at the infrastructure level.
- **Kubernetes Inception:** use Kubernetes to manage Kubernetes by re-using all the Kubernetes goodies you already know and love.
- **Full APIs compliant:** tenant clusters are fully CNCF compliant built with upstream Kubernetes binaries. A user does not see differences between a Kamaji provisioned cluster and a dedicated cluster.
## Getting started
Please refer to the [Getting Started guide](getting-started.md) to deploy a minimal setup of Kamaji on [KinD](https://kind.sigs.k8s.io/).
## Open Source
Kamaji is Open Source with Apache 2 license and any contribution is welcome. Open an issue or suggest an enhancement on the GitHub [project's page](https://github.com/clastix/kamaji). Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
## FAQs
Q. What does Kamaji mean?
A. Kamaji is named as the character _Kamaji_ from the Japanese movie [_Spirited Away_](https://en.wikipedia.org/wiki/Spirited_Away).
Q. Is Kamaji another Kubernetes distribution?
A. No, Kamaji is a Kubernetes Operator you can install on top of any Kubernetes cluster to provide hundreds or thousands of managed Kubernetes clusters as a service. We tested Kamaji on vanilla Kubernetes 1.22+, KinD, and Azure AKS. We expect it to work smoothly on other Kubernetes distributions. The tenant clusters made with Kamaji are conformant CNCF Kubernetes clusters as we leverage [`kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
Q. Is it safe to run Kubernetes control plane components in a pod instead of dedicated virtual machines?
A. Yes, the tenant control plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
Q. You already provide a Kubernetes multi-tenancy solution with [Capsule](https://capsule.clastix.io). Why does Kamaji matter?
A. A multi-tenancy solution, like Capsule shares the Kubernetes control plane among all tenants keeping tenant namespaces isolated by policies. While the solution is the right choice by balancing between features and ease of usage, there are cases where a tenant user requires access to the control plane, for example, when a tenant requires to manage CRDs on his own. With Kamaji, you can provide cluster admin permissions to the tenant.
Q. Well you convinced me, how to get a try?
A. It is possible to get started with Kamaji on a laptop with [KinD](getting-started.md) installed.

File diff suppressed because it is too large Load Diff

View File

@@ -39,76 +39,3 @@ Available environment variables are:
| `KAMAJI_HEALTH_PROBE_BIND_ADDRESS` | The address the probe endpoint binds to. (default ":8081") |
| `KAMAJI_LEADER_ELECTION` | Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. |
| `KAMAJI_TMP_DIRECTORY` | Directory which will be used to work with temporary files. (default "/tmp/kamaji") |
## Build and deploy
Clone the repo on your workstation.
```bash
## Install dependencies
$ go mod tidy
## Generate code
$ make generate
## Generate Manifests
$ make manifests
## Install Manifests
$ make install
## Build Docker Image
$ IMG=<image name and tag> make docker-build
## Push Docker Image
$ IMG=<image name and tag> make docker-push
## Deploy Kamaji
$ IMG=<image name and tag> make deploy
## YAML Installation File
$ make yaml-installation-file
```
It will generate a yaml installation file at `config/install.yaml`. It should be customize accordingly.
## Tenant Control Planes
**Kamaji** offers a [CRD](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing tenant control planes. This *CRD* is called `TenantControlPlane`, or `tcp` in short. Use the command `kubectl explain tcp.spec` to understand the fields and their usage.
### Add-ons
**Kamaji** provides optional installations into the deployed tenant control plane through add-ons. Is it possible to enable/disable them through the `tcp` definition.
### Core DNS
```yaml
addons:
coreDNS: {}
```
### Kube-Proxy
```yaml
addons:
kubeProxy: {}
```
### Konnectivity
```yaml
addons:
konnectivity:
proxyPort: 31132 # mandatory
version: v0.0.32
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 100m
memory: 128Mi
serverImage: registry.k8s.io/kas-network-proxy/proxy-server
agentImage: registry.k8s.io/kas-network-proxy/proxy-agent
```

View File

@@ -0,0 +1,70 @@
# Conformance
For organizations using Kubernetes, conformance enables interoperability, consistency, and confirmability between Kubernetes installations. The Cloud Computing Native Foundation - CNCF - provides the [Certified Kubernetes Conformance Program](https://www.cncf.io/certification/software-conformance/).
The standard set of conformance tests is currently those defined by the `[Conformance]` tag in the
[kubernetes e2e](https://github.com/kubernetes/kubernetes/tree/master/test/e2e) suite.
All the _“tenant clusters”_ built with Kamaji are CNCF conformant:
- [v1.23](https://github.com/cncf/k8s-conformance/pull/2194)
- [v1.24](https://github.com/cncf/k8s-conformance/pull/2193)
- [v1.25](https://github.com/cncf/k8s-conformance/pull/2188)
<p align="left" style="padding: 6px 6px">
<img src="https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/certified-kubernetes/versionless/color/certified-kubernetes-color.png" width="100" />
</p>
## Running the conformance tests
The standard tool for running CNCF conformance tests is [Sonobuoy](https://github.com/vmware-tanzu/sonobuoy). Sonobuoy is
regularly built and kept up to date to execute against all currently supported versions of kubernetes.
Download a [binary release](https://github.com/vmware-tanzu/sonobuoy/releases) of the CLI.
Make sure to access your tenant cluster:
```
export KUBECONFIG=tenant.kubeconfig
```
Deploy a Sonobuoy pod to your tenant cluster with:
```
sonobuoy run --mode=certified-conformance
```
> You can run the command synchronously by adding the flag `--wait` but be aware that running the conformance tests can take an hour or more.
View actively running pods:
```
sonobuoy status
```
To inspect the logs:
```
sonobuoy logs -f
```
Once `sonobuoy status` shows the run as `completed`, copy the output directory from the main Sonobuoy pod to a local directory:
```
outfile=$(sonobuoy retrieve)
```
This copies a single `.tar.gz` snapshot from the Sonobuoy pod into your local
`.` directory. Extract the contents into `./results` with:
```
mkdir ./results; tar xzf $outfile -C ./results
```
To clean up Kubernetes objects created by Sonobuoy, run:
```
sonobuoy delete
```

View File

@@ -0,0 +1,2 @@
# Reference
This section of the Kamaji documentation contains references to the project's specifications.

View File

@@ -1,8 +1,10 @@
# Versioning and support
# Versioning
In Kamaji, there are different components that might require independent versioning and support level:
|Kamaji|Admin Cluster|Tenant Cluster (min)|Tenant Cluster (max)|Konnectivity|Tenant etcd |
|------|-------------|--------------------|--------------------|------------|------------|
|0.0.1 |1.22.0+ |1.21.0 |1.23.x |0.0.32 |3.5.4 |
|0.0.1 |1.22.0+ |1.21.0 |1.23.5 |0.0.31 |3.5.4 |
|0.0.2 |1.22.0+ |1.21.0 |1.25.0 |0.0.32 |3.5.4 |
Other combinations might work but have not been tested.
Other combinations might work but they have not been yet tested.

11
docs/content/use-cases.md Normal file
View File

@@ -0,0 +1,11 @@
# Use Cases
Kamaji project has been initially started as a solution for actual and common problems such as minimizing the Total Cost of Ownership while running Kubernetes at large scale. However, it can open a wider range of use cases.
Here are a few:
- **Managed Kubernetes:** enable companies to provide Cloud Native Infrastructure with ease by introducing a strong separation of concerns between management and workloads. Centralize clusters management, monitoring, and observability by leaving developers to focus on applications, increase productivity and reduce operational costs.
- **Kubernetes as a Service:** provide Kubernetes clusters in a self-service fashion by running management and workloads on different infrastructures with the option of Bring Your Own Device, BYOD.
- **Control Plane as a Service:** provide multiple Kubernetes control planes running on top of a single Kubernetes cluster. Tenants who use namespaces based isolation often still need access to cluster wide resources like Cluster Roles, Admission Webhooks, or Custom Resource Definitions.
- **Edge Computing:** distribute Kubernetes workloads across edge computing locations without having to manage multiple clusters across various providers. Centralize management of hundreds of control planes while leaving workloads to run isolated on their own dedicated infrastructure.
- **Cluster Simulation:** check new Kubernetes API or experimental flag or a new tool without impacting production operations. Kamaji will let you simulate such things in a safe and controlled environment.
- **Workloads Testing:** check the behaviour of your workloads on different and multiple versions of Kubernetes with ease by deploying multiple Control Planes in a single cluster.

61
docs/mkdocs.yml Normal file
View File

@@ -0,0 +1,61 @@
site_name: Kamaji
repo_name: clastix/kamaji
repo_url: https://github.com/clastix/kamaji
site_url: https://kamaji.clastix.io/
docs_dir: content
site_dir: site
theme:
name: material
features:
- navigation.tabs
- navigation.tabs.sticky
- navigation.instant
- navigation.sections
include_sidebar: true
palette:
# Palette toggle for automatic mode
- media: "(prefers-color-scheme)"
toggle:
icon: material/brightness-auto
name: Switch to light mode
# Palette toggle for light mode
- media: "(prefers-color-scheme: light)"
scheme: default
toggle:
icon: material/lightbulb
name: Switch to dark mode
# Palette toggle for dark mode
- media: "(prefers-color-scheme: dark)"
scheme: slate
toggle:
icon: material/lightbulb-outline
name: Switch to system preference
# Generate navigation bar
nav:
- 'Kamaji': index.md
- 'Getting started': getting-started.md
- 'Concepts': concepts.md
- 'Guides':
- guides/index.md
- guides/kamaji-deployment-guide.md
- guides/kamaji-azure-deployment-guide.md
- guides/postgresql-datastore.md
- guides/mysql-datastore.md
- guides/upgrade.md
- 'Use Cases': use-cases.md
- 'Reference':
- reference/index.md
- reference/configuration.md
- reference/conformance.md
- reference/versioning.md
- reference/api.md
- 'Contribute':
- contribute/index.md
- contribute/guidelines.md
- contribute/governance.md

2
docs/requirements.txt Normal file
View File

@@ -0,0 +1,2 @@
mkdocs>=1.3.0
mkdocs-material>=8.2.8

1
docs/runtime.txt Normal file
View File

@@ -0,0 +1 @@
3.8

94
docs/templates/reference-cr.tmpl vendored Normal file
View File

@@ -0,0 +1,94 @@
# API Reference
Packages:
{{range .Groups}}
- [{{.Group}}/{{.Version}}](#{{ anchorize (printf "%s/%s" .Group .Version) }})
{{- end -}}{{/* range .Groups */}}
{{- range .Groups }}
{{- $group := . }}
# {{.Group}}/{{.Version}}
Resource Types:
{{range .Kinds}}
- [{{.Name}}](#{{ anchorize .Name }})
{{end}}{{/* range .Kinds */}}
{{range .Kinds}}
{{$kind := .}}
## {{.Name}}
{{range .Types}}
{{if not .IsTopLevel}}
### {{.Name}}
{{end}}
{{.Description}}
<table>
<thead>
<tr>
<th>Name</th>
<th>Type</th>
<th>Description</th>
<th>Required</th>
</tr>
</thead>
<tbody>
{{- if .IsTopLevel -}}
<tr>
<td><b>apiVersion</b></td>
<td>string</td>
<td>{{$group.Group}}/{{$group.Version}}</td>
<td>true</td>
</tr>
<tr>
<td><b>kind</b></td>
<td>string</td>
<td>{{$kind.Name}}</td>
<td>true</td>
</tr>
<tr>
<td><b><a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta">metadata</a></b></td>
<td>object</td>
<td>Refer to the Kubernetes API documentation for the fields of the `metadata` field.</td>
<td>true</td>
</tr>
{{- end -}}
{{- range .Fields -}}
<tr>
<td><b>{{if .TypeKey}}<a href="#{{.TypeKey}}">{{.Name}}</a>{{else}}{{.Name}}{{end}}</b></td>
<td>{{.Type}}</td>
<td>
{{.Description}}<br/>
{{- if or .Schema.Format .Schema.Enum .Schema.Default .Schema.Minimum .Schema.Maximum }}
<br/>
{{- end}}
{{- if .Schema.Format }}
<i>Format</i>: {{ .Schema.Format }}<br/>
{{- end }}
{{- if .Schema.Enum }}
<i>Enum</i>: {{ .Schema.Enum | toStrings | join ", " }}<br/>
{{- end }}
{{- if .Schema.Default }}
<i>Default</i>: {{ .Schema.Default }}<br/>
{{- end }}
{{- if .Schema.Minimum }}
<i>Minimum</i>: {{ .Schema.Minimum }}<br/>
{{- end }}
{{- if .Schema.Maximum }}
<i>Maximum</i>: {{ .Schema.Maximum }}<br/>
{{- end }}
</td>
<td>{{.Required}}</td>
</tr>
{{- end -}}
</tbody>
</table>
{{- end}}{{/* range .Types */}}
{{- end}}{{/* range .Kinds */}}
{{- end}}{{/* range .Groups */}}

View File

@@ -818,3 +818,15 @@ func (d *Deployment) ResetKubeAPIServerFlags(resource *appsv1.Deployment, tcp *k
resource.GetAnnotations()[apiServerFlagsAnnotation] = fmt.Sprintf("%d", len(tcp.Spec.ControlPlane.Deployment.ExtraArgs.APIServer))
}
func (d *Deployment) SetNodeSelector(spec *corev1.PodSpec, tcp *kamajiv1alpha1.TenantControlPlane) {
spec.NodeSelector = tcp.Spec.ControlPlane.Deployment.NodeSelector
}
func (d *Deployment) SetToleration(spec *corev1.PodSpec, tcp *kamajiv1alpha1.TenantControlPlane) {
spec.Tolerations = tcp.Spec.ControlPlane.Deployment.Tolerations
}
func (d *Deployment) SetAffinity(spec *corev1.PodSpec, tcp *kamajiv1alpha1.TenantControlPlane) {
spec.Affinity = tcp.Spec.ControlPlane.Deployment.Affinity
}

View File

@@ -79,6 +79,9 @@ func (r *KubernetesDeploymentResource) mutate(ctx context.Context, tenantControl
d.SetLabels(r.resource, utilities.MergeMaps(utilities.CommonLabels(tenantControlPlane.GetName()), tenantControlPlane.Spec.ControlPlane.Deployment.AdditionalMetadata.Labels))
d.SetAnnotations(r.resource, utilities.MergeMaps(r.resource.Annotations, tenantControlPlane.Spec.ControlPlane.Deployment.AdditionalMetadata.Annotations))
d.SetTemplateLabels(&r.resource.Spec.Template, r.deploymentTemplateLabels(ctx, tenantControlPlane))
d.SetNodeSelector(&r.resource.Spec.Template.Spec, tenantControlPlane)
d.SetToleration(&r.resource.Spec.Template.Spec, tenantControlPlane)
d.SetAffinity(&r.resource.Spec.Template.Spec, tenantControlPlane)
d.SetStrategy(&r.resource.Spec)
d.SetSelector(&r.resource.Spec, tenantControlPlane)
d.SetTopologySpreadConstraints(&r.resource.Spec, tenantControlPlane.Spec.ControlPlane.Deployment.TopologySpreadConstraints)