Compare commits

..

20 Commits

Author SHA1 Message Date
Dario Tranchitella
8be787adc5 reorg: marking loadbalancer errors as debug 2022-05-29 16:41:39 +00:00
Adriano Pezzuto
6b452ccd40 Update documentation with latest changes (#44)
* docs: set the kind guide as getting started
* docs: update guides to latest changes
* docs: minor add to README
2022-05-29 17:17:46 +02:00
alegrey91
0dbd73691c chore(ci): adding e2e github action 2022-05-28 09:59:13 +00:00
alegrey91
f57abb0d2e test(e2e): add test suite for e2e 2022-05-28 09:59:13 +00:00
Dario Tranchitella
2f76841753 fix(helm): missing crd update upon addons feature 2022-05-28 09:47:13 +00:00
ptx96
6cebf0950d feat(docker): build according to targetarch 2022-05-26 16:13:39 +00:00
ptx96
6ee5196928 ci(docker): added docker build and push job 2022-05-26 16:13:39 +00:00
Pietro Terrizzi
261aafbb07 docs(kind): fixed URL for kamaji develop deploy 2022-05-26 15:27:47 +00:00
mendrugory
de690a4039 chore(helm): addons 2022-05-26 14:58:05 +02:00
mendrugory
258b1ff48f feat: addons 2022-05-26 10:16:02 +02:00
Dario Tranchitella
1d64932265 docs: pointing to k8s slack channel 2022-05-25 13:51:10 +00:00
Dario Tranchitella
321a955fdb reorg: scaffolding for e2e test suite 2022-05-25 08:35:31 +00:00
Dario Tranchitella
ba09748a5b chore(helm): aligning crd to latest code generation tool 2022-05-23 15:13:01 +00:00
Dario Tranchitella
a198c21987 chore(yaml): aligning crd to latest code generation tool 2022-05-23 15:13:01 +00:00
Dario Tranchitella
3279a0e617 build(dockerfile): upgrade to go 1.18 2022-05-23 15:13:01 +00:00
Dario Tranchitella
243dbddc69 chore(ci): upgrade to go 1.18 2022-05-23 15:13:01 +00:00
Dario Tranchitella
6d7d900ac2 chore: upgrade to go 1.18 2022-05-23 15:13:01 +00:00
Dario Tranchitella
6bf838204d chore: using go install rather than go get 2022-05-23 15:13:01 +00:00
Dario Tranchitella
c0e718cb07 chore: using docker hub as default container registry 2022-05-23 13:47:48 +00:00
Dario Tranchitella
0be08a0099 reorg: upgrade phase as first resource
Ensuring that upon a Tenant Control Plane upgrade the additional printer
columns are reporting a coherent status.
2022-05-23 10:19:43 +00:00
54 changed files with 2831 additions and 2045 deletions

View File

@@ -12,6 +12,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '1.18'
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v2.3.0
with:
@@ -27,7 +30,7 @@ jobs:
fetch-depth: 0
- uses: actions/setup-go@v2
with:
go-version: '1.17'
go-version: '1.18'
- run: make yaml-installation-file
- name: Checking if YAML installer file is not aligned
run: if [[ $(git diff | wc -l) -gt 0 ]]; then echo ">>> Untracked generated files have not been committed" && git --no-pager diff && exit 1; fi

74
.github/workflows/docker-ci.yml vendored Normal file
View File

@@ -0,0 +1,74 @@
name: docker-ci
on:
push:
tags:
- "v*"
jobs:
docker-ci:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Docker meta
id: meta
uses: docker/metadata-action@v3
with:
images: |
quay.io/${{ github.repository }}
docker.io/${{ github.repository }}
tags: |
type=semver,pattern={{raw}}
flavor: |
latest=false
- name: Set up QEMU
id: qemu
uses: docker/setup-qemu-action@v1
with:
platforms: arm64,arm
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
install: true
- name: Inspect builder
run: |
echo "Name: ${{ steps.buildx.outputs.name }}"
echo "Endpoint: ${{ steps.buildx.outputs.endpoint }}"
echo "Status: ${{ steps.buildx.outputs.status }}"
echo "Flags: ${{ steps.buildx.outputs.flags }}"
echo "Platforms: ${{ steps.buildx.outputs.platforms }}"
- name: Login to quay.io Container Registry
uses: docker/login-action@v1
with:
registry: quay.io
username: ${{ secrets.QUAY_IO_USERNAME }}
password: ${{ secrets.QUAY_IO_TOKEN }}
- name: Login to docker.io Container Registry
uses: docker/login-action@v1
with:
registry: docker.io
username: ${{ secrets.DOCKER_IO_USERNAME }}
password: ${{ secrets.DOCKER_IO_TOKEN }}
- name: Build and push
id: build-release
uses: docker/build-push-action@v2
with:
file: Dockerfile
context: .
platforms: linux/amd64,linux/arm64,linux/arm
push: true
tags: ${{ steps.meta.outputs.tags }}
build-args:
- name: Image digest
run: echo ${{ steps.build-release.outputs.digest }}

42
.github/workflows/e2e.yaml vendored Normal file
View File

@@ -0,0 +1,42 @@
name: e2e
on:
push:
branches: [ "*" ]
paths:
- '.github/workflows/e2e.yml'
- 'api/**'
- 'controllers/**'
- 'e2e/*'
- 'Dockerfile'
- 'go.*'
- 'main.go'
- 'Makefile'
pull_request:
branches: [ "*" ]
paths:
- '.github/workflows/e2e.yml'
- 'api/**'
- 'controllers/**'
- 'e2e/*'
- 'Dockerfile'
- 'go.*'
- 'main.go'
- 'Makefile'
jobs:
kind:
name: Kubernetes
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-go@v2
with:
go-version: '1.18'
- run: |
sudo apt-get update
sudo apt-get install -y golang-cfssl
- name: e2e testing
run: make e2e

View File

@@ -1,5 +1,7 @@
# Build the manager binary
FROM golang:1.17 as builder
FROM golang:1.18 as builder
ARG TARGETARCH
WORKDIR /workspace
# Copy the Go Modules manifests
@@ -16,7 +18,7 @@ COPY controllers/ controllers/
COPY internal/ internal/
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go
RUN CGO_ENABLED=0 GOOS=linux GOARCH=$TARGETARCH go build -a -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details

102
Makefile
View File

@@ -36,11 +36,9 @@ IMAGE_TAG_BASE ?= clastix.io/operator
BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION)
# Image URL to use all building/pushing image targets
IMG ?= controller:latest
IMG ?= clastix/kamaji:latest
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false"
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST_K8S_VERSION = 1.21
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
@@ -50,7 +48,6 @@ GOBIN=$(shell go env GOBIN)
endif
# Setting SHELL to bash allows bash commands to be executed by recipes.
# This is a requirement for 'setup-envtest.sh' in the test target.
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
SHELL = /usr/bin/env bash -o pipefail
.SHELLFLAGS = -ec
@@ -73,6 +70,29 @@ all: build
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Binary
.PHONY: helm
HELM = $(shell pwd)/bin/helm
helm: ## Download helm locally if necessary.
$(call go-install-tool,$(HELM),helm.sh/helm/v3/cmd/helm@v3.9.0)
GINKGO = $(shell pwd)/bin/ginkgo
ginkgo: ## Download ginkgo locally if necessary.
$(call go-install-tool,$(GINKGO),github.com/onsi/ginkgo/ginkgo@v1.16.5)
KIND = $(shell pwd)/bin/kind
kind: ## Download kind locally if necessary.
$(call go-install-tool,$(KIND),sigs.k8s.io/kind/cmd/kind@v0.14.0)
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.6.1)
KUSTOMIZE = $(shell pwd)/bin/kustomize
kustomize: ## Download kustomize locally if necessary.
$(call install-kustomize,$(KUSTOMIZE),3.8.7)
##@ Development
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
@@ -81,14 +101,8 @@ manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and Cust
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
fmt: ## Run go fmt against code.
go fmt ./...
vet: ## Run go vet against code.
go vet ./...
test: manifests generate fmt vet envtest ## Run tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out
test:
go test ./... -coverprofile cover.out
##@ Build
@@ -98,7 +112,7 @@ build: generate fmt vet ## Build manager binary.
run: manifests generate fmt vet ## Run a controller from your host.
go run ./main.go
docker-build: test ## Build docker image with the manager.
docker-build: ## Build docker image with the manager.
docker build -t ${IMG} .
docker-push: ## Push docker image with the manager.
@@ -109,8 +123,8 @@ docker-push: ## Push docker image with the manager.
dev: generate manifests uninstall install rbac ## Full installation for development purposes
go fmt ./...
load: dev docker-build
kind load docker-image --name kamaji ${IMG}
load: docker-build kind
$(KIND) load docker-image --name kamaji ${IMG}
rbac: manifests kustomize ## Install RBAC into the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/rbac | kubectl apply -f -
@@ -131,33 +145,6 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi
yaml-installation-file: manifests kustomize ## Create yaml installation file
$(KUSTOMIZE) build config/default > config/install.yaml
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.6.1)
KUSTOMIZE = $(shell pwd)/bin/kustomize
kustomize: ## Download kustomize locally if necessary.
$(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7)
ENVTEST = $(shell pwd)/bin/setup-envtest
envtest: ## Download envtest-setup locally if necessary.
$(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest)
# go-get-tool will 'go get' any package $2 and install it to $1.
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
define go-get-tool
@[ -f $(1) ] || { \
set -e ;\
TMP_DIR=$$(mktemp -d) ;\
cd $$TMP_DIR ;\
go mod init tmp ;\
echo "Downloading $(2)" ;\
GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\
rm -rf $$TMP_DIR ;\
}
endef
.PHONY: bundle
bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
operator-sdk generate kustomize manifests -q
@@ -213,3 +200,34 @@ catalog-build: opm ## Build a catalog image.
.PHONY: catalog-push
catalog-push: ## Push a catalog image.
$(MAKE) docker-push IMG=$(CATALOG_IMG)
define install-kustomize
@[ -f $(1) ] || { \
set -e ;\
echo "Installing v$(2)" ;\
cd bin ;\
wget "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" ;\
bash ./install_kustomize.sh $(2) ;\
}
endef
# go-install-tool will 'go install' any package $2 and install it to $1.
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
define go-install-tool
@[ -f $(1) ] || { \
set -e ;\
echo "Installing $(2)" ;\
GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\
}
endef
.PHONY: env
env:
@make -C deploy/kind kamaji
##@ e2e
.PHONY: e2e
e2e: env load helm ginkgo ## Create a KinD cluster, install Kamaji on it and run the test suite.
$(HELM) upgrade --debug --install kamaji ./helm/kamaji --create-namespace --namespace kamaji-system --set "image.pullPolicy=Never"
$(GINKGO) -v ./e2e

View File

@@ -43,19 +43,25 @@ A dedicated `etcd` cluster for each tenant cluster doesnt scale well for a ma
With this solution, the resiliency is guaranteed by the usual `etcd` mechanism, and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that we have to operate an external `etcd` cluster and manage the access to be sure that each tenant cluster uses only its data. Also, there are limits in size in `etcd`, defaulted to 2GB and configurable to a maximum of 8GB. Were solving this issue by pooling multiple `etcd` and sharding the tenant control planes.
## Use cases
Kamaji project has been initially started as a solution for actual and common problems such as minimizing the Total Cost of Ownership while running Kubernetes at scale. However, it can open a wider range of use cases. Here are a few:
Kamaji project has been initially started as a solution for actual and common problems such as minimizing the Total Cost of Ownership while running Kubernetes at large scale. However, it can open a wider range of use cases. Here are a few:
### Managed Kubernetes
Enabling companies to provide Cloud Native Infrastructure with ease by introducing a strong separation of concerns between management and workloads. Centralize clusters management, monitoring, and observability by leaving developers to focus on the applications, increase productivity and reduce operational costs.
### Kubernetes as a Service
Provide Kubernetes clusters in a self-service fashion by running management and workloads on different infrastructures and cost centers with the option of Bring Your Own Device - BYOD.
### Control Plane as a Service
Provide Kubernetes control plane in a self-service fashion by running management and workloads on different infrastructures and cost centers with the option of Bring Your Own Device - BYOD.
Provide multiple Kubernetes control planes running on top of a single Kubernetes cluster. Tenants who use namespaces based isolation often still need access to cluster wide resources like Cluster Roles, Admission Webhooks, or Custom Resource Definitions.
### Edge Computing
Distribute Kubernetes workloads across edge computing locations without having to manage multiple clusters across various providers. Centralize management of hundreds of control planes while leaving workloads to run isolated on their own dedicated infrastructure.
### Cluster Simulations
Test a new Kubernetes API or experimental flag or a new tool without impacting production operations. Kamaji will let you simulate such things in a safe and controlled environment.
### Cluster Simulation
Check new Kubernetes API or experimental flag or a new tool without impacting production operations. Kamaji will let you simulate such things in a safe and controlled environment.
### Workloads Testing
Check the behaviour of your workloads on different and multiple versions of Kubernetes with ease by deploying multiple Control Planes in a single cluster.
## Features
@@ -88,7 +94,6 @@ Tenant clusters are fully CNCF compliant built with upstream Kubernetes binaries
- [ ] `kine` integration, i.e. use MySQL, SQLite, PostgreSQL as datastore
- [ ] Deeper `kubeadm` integration
- [ ] `etcd` pooling
- [ ] Tenant Control Planes sharding
## Documentation
Please, check the project's [documentation](./docs/) for getting started with Kamaji.
@@ -96,6 +101,9 @@ Please, check the project's [documentation](./docs/) for getting started with Ka
## Contributions
Kamaji is Open Source with Apache 2 license and any contribution is welcome.
## Community
Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
## FAQ
Q. What does Kamaji means?
@@ -119,4 +127,4 @@ A. Lighter Multi-Tenancy solutions, like Capsule shares the Kubernetes control p
Q. So I need a costly cloud infrastructure to try Kamaji?
A. No, it is possible to try Kamaji on your laptop with [KinD](./deploy/kind/README.md).
A. No, it is possible to getting started Kamaji on your laptop with [KinD](./docs/getting-started-with-kamaji.md).

6
api/interfaces.go Normal file
View File

@@ -0,0 +1,6 @@
package api
type KubeadmConfigResourceVersionDependant interface {
GetKubeadmConfigResourceVersion() string
SetKubeadmConfigResourceVersion(string)
}

View File

@@ -5,12 +5,13 @@ package v1alpha1
import (
"context"
"fmt"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
kamajierrors "github.com/clastix/kamaji/internal/errors"
)
func (in *TenantControlPlane) GetAddress(ctx context.Context, client client.Client) (string, error) {
@@ -29,7 +30,7 @@ func (in *TenantControlPlane) GetAddress(ctx context.Context, client client.Clie
case svc.Spec.Type == corev1.ServiceTypeLoadBalancer:
loadBalancerStatus = svc.Status.LoadBalancer
if len(loadBalancerStatus.Ingress) == 0 {
return "", fmt.Errorf("cannot retrieve the TenantControlPlane address, Service resource is not yet exposed as LoadBalancer")
return "", kamajierrors.NonExposedLoadBalancerError{}
}
for _, lb := range loadBalancerStatus.Ingress {
@@ -39,5 +40,5 @@ func (in *TenantControlPlane) GetAddress(ctx context.Context, client client.Clie
}
}
return "", fmt.Errorf("the actual resource doesn't have yet a valid IP address")
return "", kamajierrors.MissingValidIPError{}
}

View File

@@ -90,6 +90,20 @@ type ServiceSpec struct {
ServiceType ServiceType `json:"serviceType"`
}
// AddonSpec defines the spec for every addon.
type AddonSpec struct {
// +kubebuilder:default=true
Enabled *bool `json:"enabled,omitempty"`
}
// AddonsSpec defines the enabled addons and their features.
type AddonsSpec struct {
// +kubebuilder:default={enabled: true}
CoreDNS AddonSpec `json:"coreDNS,omitempty"`
// +kubebuilder:default={enabled: true}
KubeProxy AddonSpec `json:"kubeProxy,omitempty"`
}
// TenantControlPlaneSpec defines the desired state of TenantControlPlane.
type TenantControlPlaneSpec struct {
ControlPlane ControlPlane `json:"controlPlane"`
@@ -99,6 +113,10 @@ type TenantControlPlaneSpec struct {
// NetworkProfile specifies how the network is
NetworkProfile NetworkProfileSpec `json:"networkProfile,omitempty"`
// Addons contain which addons are enabled
// +kubebuilder:default={coreDNS: {enabled: true}, kubeProxy: {enabled: true}}
Addons AddonsSpec `json:"addons,omitempty"`
}
// ETCDAPIServerCertificate defines the observed state of ETCD Certificate for API server.
@@ -179,15 +197,42 @@ type KubeadmPhaseStatus struct {
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
}
func (d KubeadmPhaseStatus) GetKubeadmConfigResourceVersion() string {
return d.KubeadmConfigResourceVersion
}
func (d *KubeadmPhaseStatus) SetKubeadmConfigResourceVersion(rv string) {
d.KubeadmConfigResourceVersion = rv
}
// KubeadmPhasesStatus contains the status of the different kubeadm phases action.
type KubeadmPhasesStatus struct {
UploadConfigKubeadm KubeadmPhaseStatus `json:"uploadConfigKubeadm"`
UploadConfigKubelet KubeadmPhaseStatus `json:"uploadConfigKubelet"`
AddonCoreDNS KubeadmPhaseStatus `json:"addonCoreDNS"`
AddonKubeProxy KubeadmPhaseStatus `json:"addonKubeProxy"`
BootstrapToken KubeadmPhaseStatus `json:"bootstrapToken"`
}
// AddonStatus defines the observed state of an Addon.
type AddonStatus struct {
Enabled bool `json:"enabled"`
KubeadmConfigResourceVersion string `json:"kubeadmConfigResourceVersion,omitempty"`
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
}
func (d AddonStatus) GetKubeadmConfigResourceVersion() string {
return d.KubeadmConfigResourceVersion
}
func (d *AddonStatus) SetKubeadmConfigResourceVersion(rv string) {
d.KubeadmConfigResourceVersion = rv
}
// AddonsStatus defines the observed state of the different Addons.
type AddonsStatus struct {
CoreDNS AddonStatus `json:"coreDNS,omitempty"`
KubeProxy AddonStatus `json:"kubeProxy,omitempty"`
}
// TenantControlPlaneStatus defines the observed state of TenantControlPlane.
type TenantControlPlaneStatus struct {
// Storage Status contains information about Kubernetes storage system
@@ -205,6 +250,8 @@ type TenantControlPlaneStatus struct {
KubeadmPhase KubeadmPhasesStatus `json:"kubeadmPhase,omitempty"`
// ControlPlaneEndpoint contains the status of the kubernetes control plane
ControlPlaneEndpoint string `json:"controlPlaneEndpoint,omitempty"`
// Addons contains the status of the different Addons
Addons AddonsStatus `json:"addons,omitempty"`
}
// KubernetesStatus defines the status of the resources deployed in the management cluster,

View File

@@ -57,6 +57,76 @@ func (in *AdditionalMetadata) DeepCopy() *AdditionalMetadata {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AddonSpec) DeepCopyInto(out *AddonSpec) {
*out = *in
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddonSpec.
func (in *AddonSpec) DeepCopy() *AddonSpec {
if in == nil {
return nil
}
out := new(AddonSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AddonStatus) DeepCopyInto(out *AddonStatus) {
*out = *in
in.LastUpdate.DeepCopyInto(&out.LastUpdate)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddonStatus.
func (in *AddonStatus) DeepCopy() *AddonStatus {
if in == nil {
return nil
}
out := new(AddonStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AddonsSpec) DeepCopyInto(out *AddonsSpec) {
*out = *in
in.CoreDNS.DeepCopyInto(&out.CoreDNS)
in.KubeProxy.DeepCopyInto(&out.KubeProxy)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddonsSpec.
func (in *AddonsSpec) DeepCopy() *AddonsSpec {
if in == nil {
return nil
}
out := new(AddonsSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AddonsStatus) DeepCopyInto(out *AddonsStatus) {
*out = *in
in.CoreDNS.DeepCopyInto(&out.CoreDNS)
in.KubeProxy.DeepCopyInto(&out.KubeProxy)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddonsStatus.
func (in *AddonsStatus) DeepCopy() *AddonsStatus {
if in == nil {
return nil
}
out := new(AddonsStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in AdmissionControllers) DeepCopyInto(out *AdmissionControllers) {
{
@@ -255,8 +325,6 @@ func (in *KubeadmPhasesStatus) DeepCopyInto(out *KubeadmPhasesStatus) {
*out = *in
in.UploadConfigKubeadm.DeepCopyInto(&out.UploadConfigKubeadm)
in.UploadConfigKubelet.DeepCopyInto(&out.UploadConfigKubelet)
in.AddonCoreDNS.DeepCopyInto(&out.AddonCoreDNS)
in.AddonKubeProxy.DeepCopyInto(&out.AddonKubeProxy)
in.BootstrapToken.DeepCopyInto(&out.BootstrapToken)
}
@@ -564,6 +632,7 @@ func (in *TenantControlPlaneSpec) DeepCopyInto(out *TenantControlPlaneSpec) {
in.ControlPlane.DeepCopyInto(&out.ControlPlane)
in.Kubernetes.DeepCopyInto(&out.Kubernetes)
in.NetworkProfile.DeepCopyInto(&out.NetworkProfile)
in.Addons.DeepCopyInto(&out.Addons)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantControlPlaneSpec.
@@ -585,6 +654,7 @@ func (in *TenantControlPlaneStatus) DeepCopyInto(out *TenantControlPlaneStatus)
in.Kubernetes.DeepCopyInto(&out.Kubernetes)
in.KubeadmConfig.DeepCopyInto(&out.KubeadmConfig)
in.KubeadmPhase.DeepCopyInto(&out.KubeadmPhase)
in.Addons.DeepCopyInto(&out.Addons)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantControlPlaneStatus.

View File

@@ -60,6 +60,33 @@ spec:
spec:
description: TenantControlPlaneSpec defines the desired state of TenantControlPlane.
properties:
addons:
default:
coreDNS:
enabled: true
kubeProxy:
enabled: true
description: Addons contain which addons are enabled
properties:
coreDNS:
default:
enabled: true
description: AddonSpec defines the spec for every addon.
properties:
enabled:
default: true
type: boolean
type: object
kubeProxy:
default:
enabled: true
description: AddonSpec defines the spec for every addon.
properties:
enabled:
default: true
type: boolean
type: object
type: object
controlPlane:
description: ControlPlane defines how the Tenant Control Plane Kubernetes
resources must be created in the Admin Cluster, such as the number
@@ -278,6 +305,36 @@ spec:
status:
description: TenantControlPlaneStatus defines the observed state of TenantControlPlane.
properties:
addons:
description: Addons contains the status of the different Addons
properties:
coreDNS:
description: AddonStatus defines the observed state of an Addon.
properties:
enabled:
type: boolean
kubeadmConfigResourceVersion:
type: string
lastUpdate:
format: date-time
type: string
required:
- enabled
type: object
kubeProxy:
description: AddonStatus defines the observed state of an Addon.
properties:
enabled:
type: boolean
kubeadmConfigResourceVersion:
type: string
lastUpdate:
format: date-time
type: string
required:
- enabled
type: object
type: object
certificates:
description: Certificates contains information about the different
certificates that are necessary to run a kubernetes control plane
@@ -370,26 +427,6 @@ spec:
description: KubeadmPhase contains the status of the kubeadm phases
action
properties:
addonCoreDNS:
description: KubeadmPhasesStatus contains the status of of a kubeadm
phase action.
properties:
kubeadmConfigResourceVersion:
type: string
lastUpdate:
format: date-time
type: string
type: object
addonKubeProxy:
description: KubeadmPhasesStatus contains the status of of a kubeadm
phase action.
properties:
kubeadmConfigResourceVersion:
type: string
lastUpdate:
format: date-time
type: string
type: object
bootstrapToken:
description: KubeadmPhasesStatus contains the status of of a kubeadm
phase action.
@@ -421,8 +458,6 @@ spec:
type: string
type: object
required:
- addonCoreDNS
- addonKubeProxy
- bootstrapToken
- uploadConfigKubeadm
- uploadConfigKubelet

View File

@@ -60,6 +60,33 @@ spec:
spec:
description: TenantControlPlaneSpec defines the desired state of TenantControlPlane.
properties:
addons:
default:
coreDNS:
enabled: true
kubeProxy:
enabled: true
description: Addons contain which addons are enabled
properties:
coreDNS:
default:
enabled: true
description: AddonSpec defines the spec for every addon.
properties:
enabled:
default: true
type: boolean
type: object
kubeProxy:
default:
enabled: true
description: AddonSpec defines the spec for every addon.
properties:
enabled:
default: true
type: boolean
type: object
type: object
controlPlane:
description: ControlPlane defines how the Tenant Control Plane Kubernetes resources must be created in the Admin Cluster, such as the number of Pod replicas, the Service resource, or the Ingress.
properties:
@@ -257,6 +284,36 @@ spec:
status:
description: TenantControlPlaneStatus defines the observed state of TenantControlPlane.
properties:
addons:
description: Addons contains the status of the different Addons
properties:
coreDNS:
description: AddonStatus defines the observed state of an Addon.
properties:
enabled:
type: boolean
kubeadmConfigResourceVersion:
type: string
lastUpdate:
format: date-time
type: string
required:
- enabled
type: object
kubeProxy:
description: AddonStatus defines the observed state of an Addon.
properties:
enabled:
type: boolean
kubeadmConfigResourceVersion:
type: string
lastUpdate:
format: date-time
type: string
required:
- enabled
type: object
type: object
certificates:
description: Certificates contains information about the different certificates that are necessary to run a kubernetes control plane
properties:
@@ -343,24 +400,6 @@ spec:
kubeadmPhase:
description: KubeadmPhase contains the status of the kubeadm phases action
properties:
addonCoreDNS:
description: KubeadmPhasesStatus contains the status of of a kubeadm phase action.
properties:
kubeadmConfigResourceVersion:
type: string
lastUpdate:
format: date-time
type: string
type: object
addonKubeProxy:
description: KubeadmPhasesStatus contains the status of of a kubeadm phase action.
properties:
kubeadmConfigResourceVersion:
type: string
lastUpdate:
format: date-time
type: string
type: object
bootstrapToken:
description: KubeadmPhasesStatus contains the status of of a kubeadm phase action.
properties:
@@ -389,8 +428,6 @@ spec:
type: string
type: object
required:
- addonCoreDNS
- addonKubeProxy
- bootstrapToken
- uploadConfigKubeadm
- uploadConfigKubelet
@@ -1021,7 +1058,7 @@ spec:
- --leader-elect
command:
- /manager
image: quay.io/clastix/kamaji:latest
image: clastix/kamaji:latest
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@@ -12,5 +12,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: quay.io/clastix/kamaji
newName: clastix/kamaji
newTag: latest

View File

@@ -21,7 +21,7 @@ spec:
labels:
tenant.clastix.io: test
kind.clastix.io: service
serviceType: ClusterIP
serviceType: LoadBalancer
ingress:
enabled: true
hostname: kamaji.local
@@ -46,3 +46,8 @@ spec:
podCidr: "10.244.0.0/16"
dnsServiceIPs:
- "10.96.0.10"
addons:
coreDNS:
enabled: true
kubeProxy:
enabled: true

View File

@@ -21,6 +21,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
kamajierrors "github.com/clastix/kamaji/internal/errors"
"github.com/clastix/kamaji/internal/resources"
)
@@ -114,6 +115,10 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
}
registeredResources := []resources.Resource{
&resources.KubernetesUpgrade{
Name: "upgrade",
Client: r.Client,
},
&resources.KubernetesServiceResource{
Client: r.Client,
},
@@ -212,10 +217,6 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
ETCDCACertsSecret: getNamespacedName(r.Config.ETCDCASecretNamespace, r.Config.ETCDCASecretName),
Endpoints: getArrayFromString(r.Config.ETCDEndpoints),
},
&resources.KubernetesUpgrade{
Name: "upgrade",
Client: r.Client,
},
&resources.KubernetesDeploymentResource{
Client: r.Client,
ETCDEndpoints: getArrayFromString(r.Config.ETCDEndpoints),
@@ -224,41 +225,47 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
&resources.KubernetesIngressResource{
Client: r.Client,
},
&resources.KubeadmPhaseResource{
Name: "upload-config-kubeadm",
Client: r.Client,
Log: log,
KubeadmPhase: resources.PhaseUploadConfigKubeadm,
&resources.KubeadmPhase{
Name: "upload-config-kubeadm",
Client: r.Client,
Log: log,
Phase: resources.PhaseUploadConfigKubeadm,
},
&resources.KubeadmPhaseResource{
Name: "upload-config-kubelet",
Client: r.Client,
Log: log,
KubeadmPhase: resources.PhaseUploadConfigKubelet,
&resources.KubeadmPhase{
Name: "upload-config-kubelet",
Client: r.Client,
Log: log,
Phase: resources.PhaseUploadConfigKubelet,
},
&resources.KubeadmPhaseResource{
Name: "addon-coredns",
Client: r.Client,
Log: log,
KubeadmPhase: resources.PhaseAddonCoreDNS,
&resources.KubeadmPhase{
Name: "bootstrap-token",
Client: r.Client,
Log: log,
Phase: resources.PhaseBootstrapToken,
},
&resources.KubeadmPhaseResource{
Name: "addon-kubeproxy",
&resources.KubeadmAddonResource{
Name: "coredns",
Client: r.Client,
Log: log,
KubeadmPhase: resources.PhaseAddonKubeProxy,
KubeadmAddon: resources.AddonCoreDNS,
},
&resources.KubeadmPhaseResource{
Name: "bootstrap-token",
&resources.KubeadmAddonResource{
Name: "kubeproxy",
Client: r.Client,
Log: log,
KubeadmPhase: resources.PhaseBootstrapToken,
KubeadmAddon: resources.AddonKubeProxy,
},
}
for _, resource := range registeredResources {
result, err := resources.Handle(ctx, resource, tenantControlPlane)
if err != nil {
if kamajierrors.ShouldReconcileErrorBeIgnored(err) {
log.V(1).Info("sentinel error, enqueuing back request", "error", err.Error())
return ctrl.Result{Requeue: true}, nil
}
return ctrl.Result{}, err
}

View File

@@ -27,10 +27,10 @@ ingress-nginx-install:
kubectl apply -f $(kind_path)/nginx-deploy.yaml
kamaji-kind-worker-build:
docker build -f $(kind_path)/kamaji-kind-worker.dockerfile -t quay.io/clastix/kamaji-kind-worker:$${WORKER_VERSION:-latest} .
docker build -f $(kind_path)/kamaji-kind-worker.dockerfile -t clastix/kamaji-kind-worker:$${WORKER_VERSION:-latest} .
kamaji-kind-worker-push: kamaji-kind-worker-build
docker push quay.io/clastix/kamaji-kind-worker:$${WORKER_VERSION:-latest}
docker push clastix/kamaji-kind-worker:$${WORKER_VERSION:-latest}
kamaji-kind-worker-join:
$(kind_path)/join-node.bash

View File

@@ -1,138 +0,0 @@
# Setup a minimal Kamaji for development
This document explains how to deploy a minimal Kamaji setup on [KinD](https://kind.sigs.k8s.io/) for development scopes. Please refer to the [Kamaji documentation](../../README.md) for understanding all the terms used in this guide, as for example: `admin cluster` and `tenant control plane`.
## Tools
We assume you have installed on your workstation:
- [Docker](https://docs.docker.com/engine/install/)
- [KinD](https://kind.sigs.k8s.io/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
- [kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/)
- [jq](https://stedolan.github.io/jq/)
- [openssl](https://www.openssl.org/)
- [cfssl](https://github.com/cloudflare/cfssl)
- [cfssljson](https://github.com/cloudflare/cfssl)
## Setup Kamaji on KinD
The instance of Kamaji is made of a single node hosting:
- admin control-plane
- admin worker
- multi-tenant etcd cluster
The multi-tenant etcd cluster is deployed as statefulset into the Kamaji node.
Run `make kamaji` to setup Kamaji on KinD.
```bash
cd ./deploy/kind
make kamaji
```
At this moment you will have your KinD up and running and ETCD cluster in multitenant mode.
### Install Kamaji
```bash
$ kubectl apply -f ../../config/install.yaml
```
### Deploy Tenant Control Plane
Now it is the moment of deploying your first tenant control plane.
```bash
$ kubectl apply -f - <<EOF
apiVersion: kamaji.clastix.io/v1alpha1
kind: TenantControlPlane
metadata:
name: tenant1
spec:
controlPlane:
deployment:
replicas: 2
additionalMetadata:
annotations:
environment.clastix.io: tenant1
tier.clastix.io: "0"
labels:
tenant.clastix.io: tenant1
kind.clastix.io: deployment
service:
additionalMetadata:
annotations:
environment.clastix.io: tenant1
tier.clastix.io: "0"
labels:
tenant.clastix.io: tenant1
kind.clastix.io: service
serviceType: NodePort
ingress:
enabled: false
kubernetes:
version: "v1.23.4"
kubelet:
cgroupfs: cgroupfs
admissionControllers:
- LimitRanger
- ResourceQuota
networkProfile:
address: "172.18.0.2"
port: 31443
domain: "clastix.labs"
serviceCidr: "10.96.0.0/16"
podCidr: "10.244.0.0/16"
dnsServiceIPs:
- "10.96.0.10"
EOF
```
> Check networkProfile fields according to your installation
> To let Kamaji works in kind, you have indicate that the service must be [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport)
### Get Kubeconfig
Let's retrieve kubeconfig and store in `/tmp/kubeconfig`
```bash
$ kubectl get secrets tenant1-admin-kubeconfig -o json \
| jq -r '.data["admin.conf"]' \
| base64 -d > /tmp/kubeconfig
```
It can be export it, to facilitate the next tasks:
```bash
$ export KUBECONFIG=/tmp/kubeconfig
```
### Install CNI
We highly recommend to install [kindnet](https://github.com/aojea/kindnet) as CNI for your kamaji TCP.
```bash
$ kubectl create -f https://raw.githubusercontent.com/aojea/kindnet/master/install-kindnet.yaml
```
### Join worker nodes
```bash
$ make kamaji-kind-worker-join
```
> To add more worker nodes, run again the command above.
Check out the node:
```bash
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
d2d4b468c9de Ready <none> 44s v1.23.4
```
> For more complex scenarios (exposing port, different version and so on), run `join-node.bash`
Tenant control plane provision has been finished in a minimal Kamaji setup based on KinD. Therefore, you could develop, test and make your own experiments with Kamaji.

View File

@@ -3,7 +3,7 @@
set -e
# Constants
export DOCKER_IMAGE_NAME="quay.io/clastix/kamaji-kind-worker"
export DOCKER_IMAGE_NAME="clastix/kamaji-kind-worker"
export DOCKER_NETWORK="kind"
# Variables

View File

@@ -10,6 +10,10 @@ nodes:
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
## required for Cluster API local development
extraMounts:
- hostPath: /var/run/docker.sock
containerPath: /var/run/docker.sock
extraPortMappings:
## expose port 80 of the node to port 80 on the host
- containerPort: 80
@@ -27,4 +31,3 @@ nodes:
- containerPort: 6443
hostPort: 8443
protocol: TCP

View File

@@ -1,604 +1,143 @@
# Setup a Kamaji environment
This getting started guide will lead you through the process of creating a basic working Kamaji setup.
# Setup a minimal Kamaji for development
Kamaji requires:
This document explains how to deploy a minimal Kamaji setup on [KinD](https://kind.sigs.k8s.io/) for development scopes. Please refer to the [Kamaji documentation](../README.md) for understanding all the terms used in this guide, as for example: `admin cluster` and `tenant control plane`.
- (optional) a bootstrap node;
- a multi-tenant `etcd` cluster made of 3 nodes hosting the datastore for the `Tenant`s' clusters
- a Kubernetes cluster, running the admin and Tenant Control Planes
- an arbitrary number of machines hosting `Tenant`s' workloads
## Tools
> In this guide, we assume all machines are running `Ubuntu 20.04`.
We assume you have installed on your workstation:
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
* [Access Admin cluster](#access-admin-cluster)
* [Setup external multi-tenant etcd](#setup-external-multi-tenant-etcd)
* [Setup internal multi-tenant etcd](#setup-internal-multi-tenant-etcd)
* [Install Kamaji controller](#install-kamaji-controller)
* [Setup Tenant cluster](#setup-tenant-cluster)
- [Docker](https://docs.docker.com/engine/install/)
- [KinD](https://kind.sigs.k8s.io/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
- [kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/)
- [jq](https://stedolan.github.io/jq/)
- [openssl](https://www.openssl.org/)
- [cfssl](https://github.com/cloudflare/cfssl)
- [cfssljson](https://github.com/cloudflare/cfssl)
## Prepare the bootstrap workspace
This getting started guide is supposed to be run from a remote or local bootstrap machine.
First, prepare the workspace directory:
## Setup Kamaji on KinD
```
git clone https://github.com/clastix/kamaji
cd kamaji/deploy
```
The instance of Kamaji is made of a single node hosting:
Throughout the instructions, shell variables are used to indicate values that you should adjust to your own environment.
- admin control-plane
- admin worker
- multi-tenant etcd cluster
### Install required tools
On the bootstrap machine, install all the required tools to work with a Kamaji setup.
The multi-tenant etcd cluster is deployed as statefulset into the Kamaji node.
#### cfssl and cfssljson
The `cfssl` and `cfssljson` command line utilities will be used in addition to `kubeadm` to provision the PKI Infrastructure and generate TLS certificates.
```
wget -q --show-progress --https-only --timestamping \
https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/linux/cfssl \
https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/linux/cfssljson
chmod +x cfssl cfssljson
sudo mv cfssl cfssljson /usr/local/bin/
```
#### Kubernetes tools
Install `kubeadm` and `kubectl`
Run `make kamaji` to setup Kamaji on KinD.
```bash
sudo apt update && sudo apt install -y apt-transport-https ca-certificates curl && \
sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg && \
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list && \
sudo apt update && sudo apt install -y kubeadm kubectl --allow-change-held-packages && \
sudo apt-mark hold kubeadm kubectl
cd ./deploy/kind
make kamaji
```
#### etcdctl
For administration of the `etcd` cluster, download and install the `etcdctl` CLI utility on the bootstrap machine
At this moment you will have your KinD up and running and ETCD cluster in multitenant mode.
### Install Kamaji
```bash
ETCD_VER=v3.5.1
ETCD_URL=https://storage.googleapis.com/etcd
curl -L ${ETCD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o etcd-${ETCD_VER}-linux-amd64.tar.gz
tar xzvf etcd-${ETCD_VER}-linux-amd64.tar.gz etcd-${ETCD_VER}-linux-amd64/etcdctl
sudo cp etcd-${ETCD_VER}-linux-amd64/etcdctl /usr/bin/etcdctl
rm -rf etcd-${ETCD_VER}-linux-amd64*
$ kubectl apply -f ../../config/install.yaml
```
Verify `etcdctl` version is installed
### Deploy Tenant Control Plane
Now it is the moment of deploying your first tenant control plane.
```bash
etcdctl version
etcdctl version: 3.5.1
API version: 3.5
```
## Access Admin cluster
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes running as pods. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters.
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. Currently we tested:
- [Kubernetes installed with `kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
- [Azure AKS managed service](./kamaji-on-azure.md).
- [KinD for local development](./kind/README.md).
The admin cluster should provide:
- CNI module installed, eg. Calico
- Support for LoadBalancer Service Type, eg. MetalLB
- Ingress Controller
- CSI module installed with StorageClass for multi-tenant `etcd`
- Monitoring Stack, eg. Prometheus and Grafana
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into a Kamaji Admin Cluster.
## Setup external multi-tenant etcd
In this section, we're going to setup a multi-tenant `etcd` cluster on dedicated nodes. Alternatively, if you want to use an internal `etcd` cluster as Kubernetes StatefulSet, jump [here](#setup-internal-multi-tenant-etcd).
### Ensure host access
From the bootstrap machine load the environment for external `etcd` setup:
```bash
source kamaji-external-etcd.env
```
The installer requires a user that has access to all hosts. In order to run the installer as a non-root user, first configure passwordless sudo rights each host:
Generate an SSH key on the host you run the installer on:
```bash
ssh-keygen -t rsa
```
> Do not use a password.
Distribute the key to the other cluster hosts.
Depending on your environment, use a bash loop:
```bash
HOSTS=(${ETCD0} ${ETCD1} ${ETCD2})
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh-copy-id -i ~/.ssh/id_rsa.pub $HOST;
done
```
> Alternatively, inject the generated public key into machines metadata.
Confirm that you can access each host from bootstrap machine:
```bash
HOSTS=(${ETCD0} ${ETCD1} ${ETCD2})
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh ${USER}@${HOST} -t 'hostname';
done
```
### Configure disk layout
As per `etcd` [requirements](https://etcd.io/docs/v3.5/op-guide/hardware/#disks), back `etcd`s storage with a SSD. A SSD usually provides lower write latencies and with less variance than a spinning disk, thus improving the stability and reliability of `etcd`.
For each `etcd` machine, we assume an additional `sdb` disk of 10GB:
```
clastix@kamaji-etcd-00:~$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 16G 0 disk
├─sda1 8:1 0 15.9G 0 part /
├─sda14 8:14 0 4M 0 part
└─sda15 8:15 0 106M 0 part /boot/efi
sdb 8:16 0 10G 0 disk
sr0 11:0 1 4M 0 rom
```
Create partition, format, and mount the `etcd` disk, by running the script below from the bootstrap machine:
> If you already used the `etcd` disks, please make sure to wipe the partitions with `sudo wipefs --all --force /dev/sdb` before to attempt to recreate them.
```bash
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
ssh ${USER}@${HOST} -t 'echo type=83 | sudo sfdisk -f -q /dev/sdb'
ssh ${USER}@${HOST} -t 'sudo mkfs -F -q -t ext4 /dev/sdb1'
ssh ${USER}@${HOST} -t 'sudo mkdir -p /var/lib/etcd'
ssh ${USER}@${HOST} -t 'sudo e2label /dev/sdb1 ETCD'
ssh ${USER}@${HOST} -t 'echo LABEL=ETCD /var/lib/etcd ext4 defaults 0 1 | sudo tee -a /etc/fstab'
ssh ${USER}@${HOST} -t 'sudo mount -a'
ssh ${USER}@${HOST} -t 'sudo lsblk -f'
done
```
### Install prerequisites
Use bash script `nodes-prerequisites.sh` to install all the dependencies on all the cluster nodes:
- Install `containerd` as container runtime
- Install `crictl`, the command line for working with `containerd`
- Install `kubectl`, `kubelet`, and `kubeadm` in the desired version, eg. `v1.24.0`
Run the installation script:
```bash
VERSION=v1.24.0
./nodes-prerequisites.sh ${VERSION:1} ${HOSTS[@]}
```
### Configure kubelet
On each `etcd` node, configure the `kubelet` service to start `etcd` static pods using `containerd` as container runtime, by running the script below from the bootstrap machine:
```bash
cat << EOF > 20-etcd-service-manager.conf
[Service]
ExecStart=
ExecStart=/usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --cgroup-driver=systemd --container-runtime=remote --container-runtime-endpoint=/run/containerd/containerd.sock
Restart=always
EOF
```
```
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
scp 20-etcd-service-manager.conf ${USER}@${HOST}:
ssh ${USER}@${HOST} -t 'sudo chown -R root:root 20-etcd-service-manager.conf && sudo mv 20-etcd-service-manager.conf /etc/systemd/system/kubelet.service.d/20-etcd-service-manager.conf'
ssh ${USER}@${HOST} -t 'sudo systemctl daemon-reload'
ssh ${USER}@${HOST} -t 'sudo systemctl start kubelet'
ssh ${USER}@${HOST} -t 'sudo systemctl enable kubelet'
done
rm -f 20-etcd-service-manager.conf
```
### Create configuration
Create temp directories to store files that will end up on `etcd` hosts:
```bash
mkdir -p /tmp/${ETCD0}/ /tmp/${ETCD1}/ /tmp/${ETCD2}/
NAMES=("etcd00" "etcd01" "etcd02")
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
NAME=${NAMES[$i]}
cat <<EOF | sudo tee /tmp/${HOST}/kubeadmcfg.yaml
apiVersion: "kubeadm.k8s.io/v1beta2"
kind: ClusterConfiguration
etcd:
local:
serverCertSANs:
- "${HOST}"
peerCertSANs:
- "${HOST}"
extraArgs:
initial-cluster: ${NAMES[0]}=https://${ETCDHOSTS[0]}:2380,${NAMES[1]}=https://${ETCDHOSTS[1]}:2380,${NAMES[2]}=https://${ETCDHOSTS[2]}:2380
initial-cluster-state: new
name: ${NAME}
listen-peer-urls: https://${HOST}:2380
listen-client-urls: https://${HOST}:2379
advertise-client-urls: https://${HOST}:2379
initial-advertise-peer-urls: https://${HOST}:2380
auto-compaction-mode: periodic
auto-compaction-retention: 5m
quota-backend-bytes: '8589934592'
EOF
done
```
> Note:
>
> ##### Etcd compaction
>
> By enabling `etcd` authentication, it prevents the tenant apiservers (clients of `etcd`) to issue compaction requests. We set `etcd` to automatically compact the keyspace with the `--auto-compaction-*` option with a period of hours or minutes. When `--auto-compaction-mode=periodic` and `--auto-compaction-retention=5m` and writes per minute are about 1000, `etcd` compacts revision 5000 for every 5 minute.
>
> ##### Etcd storage quota
>
> Currently, `etcd` is limited in storage size, defaulted to `2GB` and configurable with `--quota-backend-bytes` flag up to `8GB`. In Kamaji, we use a single `etcd` to store multiple tenant clusters, so we need to increase this size. Please, note `etcd` warns at startup if the configured value exceeds `8GB`.
### Generate certificates
On the bootstrap machine, using `kubeadm` init phase, create and distribute `etcd` CA certificates:
```bash
sudo kubeadm init phase certs etcd-ca
mkdir kamaji
sudo cp -r /etc/kubernetes/pki/etcd kamaji
sudo chown -R ${USER}. kamaji/etcd
```
For each `etcd` host:
```bash
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
sudo kubeadm init phase certs etcd-server --config=/tmp/${HOST}/kubeadmcfg.yaml
sudo kubeadm init phase certs etcd-peer --config=/tmp/${HOST}/kubeadmcfg.yaml
sudo kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST}/kubeadmcfg.yaml
sudo cp -R /etc/kubernetes/pki /tmp/${HOST}/
sudo find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete
done
```
### Startup the cluster
Upload certificates on each `etcd` node and restart the `kubelet`
```bash
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
sudo chown -R ${USER}. /tmp/${HOST}
scp -r /tmp/${HOST}/* ${USER}@${HOST}:
ssh ${USER}@${HOST} -t 'sudo chown -R root:root pki'
ssh ${USER}@${HOST} -t 'sudo mv pki /etc/kubernetes/'
ssh ${USER}@${HOST} -t 'sudo kubeadm init phase etcd local --config=kubeadmcfg.yaml'
ssh ${USER}@${HOST} -t 'sudo systemctl daemon-reload'
ssh ${USER}@${HOST} -t 'sudo systemctl restart kubelet'
done
```
This will start the static `etcd` pod on each node and then the cluster gets formed.
Generate certificates for the `root` user
```bash
cat > root-csr.json <<EOF
{
"CN": "root",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
```
```bash
cfssl gencert \
-ca=kamaji/etcd/ca.crt \
-ca-key=kamaji/etcd/ca.key \
-config=cfssl-cert-config.json \
-profile=client-authentication \
root-csr.json | cfssljson -bare root
```
```bash
cp root.pem kamaji/etcd/root.crt
cp root-key.pem kamaji/etcd/root.key
rm root*
```
The result should be:
```bash
$ tree kamaji
kamaji
└── etcd
├── ca.crt
├── ca.key
├── root.crt
└── root.key
```
Use the `root` user to check the just formed `etcd` cluster is in health state
```bash
export ETCDCTL_CACERT=kamaji/etcd/ca.crt
export ETCDCTL_CERT=kamaji/etcd/root.crt
export ETCDCTL_KEY=kamaji/etcd/root.key
export ETCDCTL_ENDPOINTS=https://${ETCD0}:2379
etcdctl member list -w table
```
The result should be something like this:
```
+------------------+---------+--------+----------------------------+----------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+--------+----------------------------+----------------------------+------------+
| 72657d6307364226 | started | etcd01 | https://192.168.32.11:2380 | https://192.168.32.11:2379 | false |
| 91eb892c5ee87610 | started | etcd00 | https://192.168.32.10:2380 | https://192.168.32.10:2379 | false |
| e9971c576949c34e | started | etcd02 | https://192.168.32.12:2380 | https://192.168.32.12:2379 | false |
+------------------+---------+--------+----------------------------+----------------------------+------------+
```
### Enable multi-tenancy
The `root` user has full access to `etcd`, must be created before activating authentication. The `root` user must have the `root` role and is allowed to change anything inside `etcd`.
```bash
etcdctl user add --no-password=true root
etcdctl role add root
etcdctl user grant-role root root
etcdctl auth enable
```
### Cleanup
If you want to get rid of the etcd cluster, for each node, login and clean it:
```bash
HOSTS=(${ETCD0} ${ETCD1} ${ETCD2})
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh ${USER}@${HOST} -t 'sudo kubeadm reset -f';
ssh ${USER}@${HOST} -t 'sudo systemctl reboot';
done
```
## Setup internal multi-tenant etcd
If you opted for an internal etcd cluster running in the Kamaji admin cluster, follow steps below.
From the bootstrap machine load the environment for internal `etcd` setup:
```bash
source kamaji-internal-etcd.env
```
### Generate certificates
On the bootstrap machine, using `kubeadm` init phase, create the `etcd` CA certificates:
```bash
sudo kubeadm init phase certs etcd-ca
mkdir kamaji
sudo cp -r /etc/kubernetes/pki/etcd kamaji
sudo chown -R ${USER}. kamaji/etcd
```
Generate the `etcd` certificates for peers:
```
cat << EOF | tee kamaji/etcd/peer-csr.json
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"etcd-0",
"etcd-0.etcd",
"etcd-0.etcd.${ETCD_NAMESPACE}.svc",
"etcd-0.etcd.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-1",
"etcd-1.etcd",
"etcd-1.etcd.${ETCD_NAMESPACE}.svc",
"etcd-1.etcd.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-2",
"etcd-2.etcd",
"etcd-2.etcd.${ETCD_NAMESPACE}.svc",
"etcd-2.etcd.${ETCD_NAMESPACE}.cluster.local"
]
}
EOF
cfssl gencert -ca=kamaji/etcd/ca.crt -ca-key=kamaji/etcd/ca.key \
-config=cfssl-cert-config.json \
-profile=peer-authentication kamaji/etcd/peer-csr.json | cfssljson -bare kamaji/etcd/peer
```
Generate the `etcd` certificates for server:
```
cat << EOF | tee kamaji/etcd/server-csr.json
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"etcd-server",
"etcd-server.${ETCD_NAMESPACE}.svc",
"etcd-server.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-0.etcd.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-1.etcd.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-2.etcd.${ETCD_NAMESPACE}.svc.cluster.local"
]
}
EOF
cfssl gencert -ca=kamaji/etcd/ca.crt -ca-key=kamaji/etcd/ca.key \
-config=cfssl-cert-config.json \
-profile=peer-authentication kamaji/etcd/server-csr.json | cfssljson -bare kamaji/etcd/server
```
Generate certificates for the `root` user of the `etcd`
```
cat << EOF | tee kamaji/etcd/root-csr.json
{
"CN": "root",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
cfssl gencert -ca=kamaji/etcd/ca.crt -ca-key=kamaji/etcd/ca.key \
-config=cfssl-cert-config.json \
-profile=client-authentication kamaji/etcd/root-csr.json | cfssljson -bare kamaji/etcd/root
```
Install the `etcd` in the Kamaji admin cluster
```bash
kubectl create namespace ${ETCD_NAMESPACE}
kubectl -n ${ETCD_NAMESPACE} create secret generic etcd-certs \
--from-file=kamaji/etcd/ca.crt \
--from-file=kamaji/etcd/ca.key \
--from-file=kamaji/etcd/peer-key.pem --from-file=kamaji/etcd/peer.pem \
--from-file=kamaji/etcd/server-key.pem --from-file=kamaji/etcd/server.pem
kubectl -n ${ETCD_NAMESPACE} apply -f etcd/etcd-cluster.yaml
```
Install an `etcd` client to interact with the `etcd` server
```bash
kubectl -n ${ETCD_NAMESPACE} create secret tls root-certs \
--key=kamaji/etcd/root-key.pem \
--cert=kamaji/etcd/root.pem
kubectl -n ${ETCD_NAMESPACE} apply -f etcd/etcd-client.yaml
```
Wait the etcd instances discover each other and the cluster is formed:
```bash
kubectl -n ${ETCD_NAMESPACE} wait pod --for=condition=ready -l app=etcd --timeout=120s
echo -n "\nChecking endpoint's health..."
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- /bin/bash -c "etcdctl endpoint health 1>/dev/null 2>/dev/null; until [ \$$? -eq 0 ]; do sleep 10; printf "."; etcdctl endpoint health 1>/dev/null 2>/dev/null; done;"
echo -n "\netcd cluster's health:\n"
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- /bin/bash -c "etcdctl endpoint health"
echo -n "\nWaiting for all members..."
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- /bin/bash -c "until [ \$$(etcdctl member list 2>/dev/null | wc -l) -eq 3 ]; do sleep 10; printf '.'; done;"
@echo -n "\netcd's members:\n"
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- /bin/bash -c "etcdctl member list -w table"
```
### Enable multi-tenancy
The `root` user has full access to `etcd`, must be created before activating authentication. The `root` user must have the `root` role and is allowed to change anything inside `etcd`.
```bash
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- etcdctl user add --no-password=true root
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- etcdctl role add root
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- etcdctl user grant-role root root
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- etcdctl auth enable
```
## Install Kamaji controller
Currently, the behaviour of the Kamaji controller for Tenant Control Plane is controlled by (in this order):
- CLI flags
- Environment variables
- Configuration file `kamaji.yaml` built into the image
By default Kamaji search for the configuration file and uses parameters found inside of it. In case some environment variable are passed, this will override configuration file parameters. In the end, if also a CLI flag is passed, this will override both env vars and config file as well.
There are multiple ways to deploy the Kamaji controller:
- Use the single YAML file installer
- Use Kustomize with Makefile
- Use the Kamaji Helm Chart
The Kamaji controller needs to access the multi-tenant `etcd` in order to provision the access for tenant `kube-apiserver`.
Create the secrets containing the `etcd` certificates
```bash
kubectl create namespace kamaji-system
kubectl -n kamaji-system create secret generic etcd-certs \
--from-file=kamaji/etcd/ca.crt \
--from-file=kamaji/etcd/ca.key
kubectl -n kamaji-system create secret tls root-client-certs \
--cert=kamaji/etcd/root.crt \
--key=kamaji/etcd/root.key
```
### Install with a single manifest
Install with the single YAML file installer:
```bash
kubectl -n kamaji-system apply -f ../config/install.yaml
```
Make sure to patch the `etcd` endpoints of the Kamaji controller, according to your environment:
```bash
cat > patch-deploy.yaml <<EOF
$ kubectl apply -f - <<EOF
apiVersion: kamaji.clastix.io/v1alpha1
kind: TenantControlPlane
metadata:
name: tenant1
spec:
template:
spec:
containers:
- name: manager
args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=127.0.0.1:8080
- --leader-elect
- --etcd-endpoints=${ETCD0}:2379,${ETCD1}:2379,${ETCD2}:2379
controlPlane:
deployment:
replicas: 2
additionalMetadata:
annotations:
environment.clastix.io: tenant1
tier.clastix.io: "0"
labels:
tenant.clastix.io: tenant1
kind.clastix.io: deployment
service:
additionalMetadata:
annotations:
environment.clastix.io: tenant1
tier.clastix.io: "0"
labels:
tenant.clastix.io: tenant1
kind.clastix.io: service
serviceType: NodePort
ingress:
enabled: false
kubernetes:
version: "v1.23.4"
kubelet:
cgroupfs: cgroupfs
admissionControllers:
- LimitRanger
- ResourceQuota
networkProfile:
address: "172.18.0.2"
port: 31443
domain: "clastix.labs"
serviceCidr: "10.96.0.0/16"
podCidr: "10.244.0.0/16"
dnsServiceIPs:
- "10.96.0.10"
addons:
coreDNS:
enabled: true
kubeProxy:
enabled: true
EOF
kubectl -n kamaji-system patch \
deployment kamaji-controller-manager \
--patch-file patch-deploy.yaml
```
The Kamaji Tenant Control Plane controller is now running on the Admin Cluster:
> Check networkProfile fields according to your installation
> To let Kamaji works in kind, you have indicate that the service must be [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport)
### Get Kubeconfig
Let's retrieve kubeconfig and store in `/tmp/kubeconfig`
```bash
kubectl -n kamaji-system get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
operator-controller-manager 1/1 1 1 14h
$ kubectl get secrets tenant1-admin-kubeconfig -o json \
| jq -r '.data["admin.conf"]' \
| base64 -d > /tmp/kubeconfig
```
It can be export it, to facilitate the next tasks:
```bash
$ export KUBECONFIG=/tmp/kubeconfig
```
## Setup Tenant Cluster
Now you are getting an Admin Cluster available to run multiple Tenant Control Planes, deployed by the Kamaji controller. Please, refer to the Kamaji Tenant Deployment [guide](./kamaji-tenant-deployment-guide.md).
### Install CNI
We highly recommend to install [kindnet](https://github.com/aojea/kindnet) as CNI for your kamaji TCP.
```bash
$ kubectl create -f https://raw.githubusercontent.com/aojea/kindnet/master/install-kindnet.yaml
```
### Join worker nodes
```bash
$ make kamaji-kind-worker-join
```
> To add more worker nodes, run again the command above.
Check out the node:
```bash
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
d2d4b468c9de Ready <none> 44s v1.23.4
```
> For more complex scenarios (exposing port, different version and so on), run `join-node.bash`
Tenant control plane provision has been finished in a minimal Kamaji setup based on KinD. Therefore, you could develop, test and make your own experiments with Kamaji.

View File

@@ -82,21 +82,10 @@ source kamaji-tenant-azure.env
### On Kamaji side
With Kamaji on AKS, the tenant control plane is accessible:
- from tenant work nodes through an internal loadbalancer as `https://${TENANT_ADDR}:${TENANT_PORT}`
- from tenant work nodes through an internal loadbalancer as `https://${TENANT_ADDR}:6443`
- from tenant admin user through an external loadbalancer `https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.azure.com:443`
#### Allocate an internal IP address for the Tenant Control Plane
Currently, Kamaji has a known limitation, meaning the address `${TENANT_ADDR}:${TENANT_PORT}` must be known in advance before to create the Tenant Control Plane. Given this limitation, let's to reserve an IP address and port in the same virtual subnet used by the Kamaji admin cluster:
```bash
export TENANT_ADDR=10.240.0.100
export TENANT_PORT=6443
export TENANT_DOMAIN=$KAMAJI_REGION.cloudapp.azure.com
```
> Make sure the `TENANT_ADDR` value does not overlap with already allocated IP addresses in the AKS virtual network. In the future, Kamaji will implement a dynamic IP allocation.
Where `TENANT_ADDR` is the Azure internal IP address assigned to the LoadBalancer service created by Kamaji to expose the Tenant Control Plane endpoint.
#### Create the Tenant Control Plane
@@ -139,13 +128,17 @@ spec:
- ResourceQuota
- LimitRanger
networkProfile:
address: ${TENANT_ADDR}
port: ${TENANT_PORT}
domain: ${TENANT_DOMAIN}
port: 6443
domain: ${KAMAJI_REGION}.cloudapp.azure.com
serviceCidr: ${TENANT_SVC_CIDR}
podCidr: ${TENANT_POD_CIDR}
dnsServiceIPs:
- ${TENANT_DNS_SERVICE}
addons:
coreDNS:
enabled: true
kubeProxy:
enabled: true
---
apiVersion: v1
kind: Service
@@ -195,6 +188,11 @@ NAME READY UP-TO-DATE AVAILABLE AGE
tenant-00 2/2 2 2 47m
```
Collect the internal IP address of Azure loadbalancer where the Tenant control Plane is exposed:
```bash
TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."status.loadBalancer.ingress[].ip")
```
#### Working with Tenant Control Plane
Check the access to the Tenant Control Plane:
@@ -225,7 +223,7 @@ NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AG
default kubernetes ClusterIP 10.32.0.1 <none> 443/TCP 6m
```
Check out how the Tenant Control Plane advertises itself to workloads:
Check out how the Tenant Control Plane advertises itself:
```
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get ep
@@ -234,7 +232,7 @@ NAME ENDPOINTS AGE
kubernetes 10.240.0.100:6443 57m
```
Make sure it's `${TENANT_ADDR}:${TENANT_PORT}`.
Make sure it's `${TENANT_ADDR}:6443`.
### Prepare the Infrastructure for the Tenant virtual machines
Kamaji provides Control Plane as a Service, so the tenant user can join his own virtual machines as worker nodes. Each tenant can place his virtual machines in a dedicated Azure virtual network.
@@ -332,7 +330,7 @@ az vmss scale \
The current approach for joining nodes is to use the `kubeadm` one therefore, we will create a bootstrap token to perform the action:
```bash
JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:${TENANT_PORT} ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command |cut -d" " -f4-)
JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command |cut -d" " -f4-)
```
A bash loop will be used to join all the available nodes.
@@ -398,6 +396,16 @@ kamaji-tenant-worker-02 Ready <none> 10m v1.23.4
## Cleanup
To get rid of the Tenant infrastructure, remove the RESOURCE_GROUP: `az group delete --name $TENANT_RG --yes --no-wait`.
To get rid of the Tenant infrastructure, remove the RESOURCE_GROUP:
To get rid of the Kamaji infrastructure, remove the RESOURCE_GROUP: `az group delete --name $KAMAJI_RG --yes --no-wait`.
```
az group delete --name $TENANT_RG --yes --no-wait
```
To get rid of the Kamaji infrastructure, remove the RESOURCE_GROUP:
```
az group delete --name $KAMAJI_RG --yes --no-wait
```
That's all folks!

View File

@@ -0,0 +1,603 @@
# Install a Kamaji environment
This guide will lead you through the process of creating a basic working Kamaji setup.
Kamaji requires:
- (optional) a bootstrap node;
- a multi-tenant `etcd` cluster made of 3 nodes hosting the datastore for the `Tenant`s' clusters
- a Kubernetes cluster, running the admin and Tenant Control Planes
- an arbitrary number of machines hosting `Tenant`s' workloads
> In this guide, we assume all machines are running `Ubuntu 20.04`.
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
* [Access Admin cluster](#access-admin-cluster)
* [Setup external multi-tenant etcd](#setup-external-multi-tenant-etcd)
* [Setup internal multi-tenant etcd](#setup-internal-multi-tenant-etcd)
* [Install Kamaji controller](#install-kamaji-controller)
* [Setup Tenant cluster](#setup-tenant-cluster)
## Prepare the bootstrap workspace
This guide is supposed to be run from a remote or local bootstrap machine.
First, prepare the workspace directory:
```
git clone https://github.com/clastix/kamaji
cd kamaji/deploy
```
Throughout the instructions, shell variables are used to indicate values that you should adjust to your own environment.
### Install required tools
On the bootstrap machine, install all the required tools to work with a Kamaji setup.
#### cfssl and cfssljson
The `cfssl` and `cfssljson` command line utilities will be used in addition to `kubeadm` to provision the PKI Infrastructure and generate TLS certificates.
```
wget -q --show-progress --https-only --timestamping \
https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/linux/cfssl \
https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/linux/cfssljson
chmod +x cfssl cfssljson
sudo mv cfssl cfssljson /usr/local/bin/
```
#### Kubernetes tools
Install `kubeadm` and `kubectl`
```bash
sudo apt update && sudo apt install -y apt-transport-https ca-certificates curl && \
sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg && \
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list && \
sudo apt update && sudo apt install -y kubeadm kubectl --allow-change-held-packages && \
sudo apt-mark hold kubeadm kubectl
```
#### etcdctl
For administration of the `etcd` cluster, download and install the `etcdctl` CLI utility on the bootstrap machine
```bash
ETCD_VER=v3.5.1
ETCD_URL=https://storage.googleapis.com/etcd
curl -L ${ETCD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o etcd-${ETCD_VER}-linux-amd64.tar.gz
tar xzvf etcd-${ETCD_VER}-linux-amd64.tar.gz etcd-${ETCD_VER}-linux-amd64/etcdctl
sudo cp etcd-${ETCD_VER}-linux-amd64/etcdctl /usr/bin/etcdctl
rm -rf etcd-${ETCD_VER}-linux-amd64*
```
Verify `etcdctl` version is installed
```bash
etcdctl version
etcdctl version: 3.5.1
API version: 3.5
```
## Access Admin cluster
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes running as pods. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters.
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. Currently we tested:
- [Kubernetes installed with `kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
- [Azure AKS managed service](./kamaji-on-azure.md).
- [KinD for local development](./getting-started-with-kamaji.md ).
The admin cluster should provide:
- CNI module installed, eg. Calico
- Support for LoadBalancer Service Type, eg. MetalLB or, alternatively, an Ingress Controller
- CSI module installed with StorageClass for multi-tenant `etcd`
- Monitoring Stack, eg. Prometheus and Grafana
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into Kamaji Admin Cluster.
## Setup external multi-tenant etcd
In this section, we're going to setup a multi-tenant `etcd` cluster on dedicated nodes. Alternatively, if you want to use an internal `etcd` cluster as Kubernetes StatefulSet, jump [here](#setup-internal-multi-tenant-etcd).
### Ensure host access
From the bootstrap machine load the environment for external `etcd` setup:
```bash
source kamaji-external-etcd.env
```
The installer requires a user that has access to all hosts. In order to run the installer as a non-root user, first configure passwordless sudo rights each host:
Generate an SSH key on the host you run the installer on:
```bash
ssh-keygen -t rsa
```
> Do not use a password.
Distribute the key to the other cluster hosts.
Depending on your environment, use a bash loop:
```bash
HOSTS=(${ETCD0} ${ETCD1} ${ETCD2})
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh-copy-id -i ~/.ssh/id_rsa.pub $HOST;
done
```
> Alternatively, inject the generated public key into machines metadata.
Confirm that you can access each host from bootstrap machine:
```bash
HOSTS=(${ETCD0} ${ETCD1} ${ETCD2})
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh ${USER}@${HOST} -t 'hostname';
done
```
### Configure disk layout
As per `etcd` [requirements](https://etcd.io/docs/v3.5/op-guide/hardware/#disks), back `etcd`s storage with a SSD. A SSD usually provides lower write latencies and with less variance than a spinning disk, thus improving the stability and reliability of `etcd`.
For each `etcd` machine, we assume an additional `sdb` disk of 10GB:
```
clastix@kamaji-etcd-00:~$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 16G 0 disk
├─sda1 8:1 0 15.9G 0 part /
├─sda14 8:14 0 4M 0 part
└─sda15 8:15 0 106M 0 part /boot/efi
sdb 8:16 0 10G 0 disk
sr0 11:0 1 4M 0 rom
```
Create partition, format, and mount the `etcd` disk, by running the script below from the bootstrap machine:
> If you already used the `etcd` disks, please make sure to wipe the partitions with `sudo wipefs --all --force /dev/sdb` before to attempt to recreate them.
```bash
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
ssh ${USER}@${HOST} -t 'echo type=83 | sudo sfdisk -f -q /dev/sdb'
ssh ${USER}@${HOST} -t 'sudo mkfs -F -q -t ext4 /dev/sdb1'
ssh ${USER}@${HOST} -t 'sudo mkdir -p /var/lib/etcd'
ssh ${USER}@${HOST} -t 'sudo e2label /dev/sdb1 ETCD'
ssh ${USER}@${HOST} -t 'echo LABEL=ETCD /var/lib/etcd ext4 defaults 0 1 | sudo tee -a /etc/fstab'
ssh ${USER}@${HOST} -t 'sudo mount -a'
ssh ${USER}@${HOST} -t 'sudo lsblk -f'
done
```
### Install prerequisites
Use bash script `nodes-prerequisites.sh` to install all the dependencies on all the cluster nodes:
- Install `containerd` as container runtime
- Install `crictl`, the command line for working with `containerd`
- Install `kubectl`, `kubelet`, and `kubeadm` in the desired version, eg. `v1.24.0`
Run the installation script:
```bash
VERSION=v1.24.0
./nodes-prerequisites.sh ${VERSION:1} ${HOSTS[@]}
```
### Configure kubelet
On each `etcd` node, configure the `kubelet` service to start `etcd` static pods using `containerd` as container runtime, by running the script below from the bootstrap machine:
```bash
cat << EOF > 20-etcd-service-manager.conf
[Service]
ExecStart=
ExecStart=/usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --cgroup-driver=systemd --container-runtime=remote --container-runtime-endpoint=/run/containerd/containerd.sock
Restart=always
EOF
```
```
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
scp 20-etcd-service-manager.conf ${USER}@${HOST}:
ssh ${USER}@${HOST} -t 'sudo chown -R root:root 20-etcd-service-manager.conf && sudo mv 20-etcd-service-manager.conf /etc/systemd/system/kubelet.service.d/20-etcd-service-manager.conf'
ssh ${USER}@${HOST} -t 'sudo systemctl daemon-reload'
ssh ${USER}@${HOST} -t 'sudo systemctl start kubelet'
ssh ${USER}@${HOST} -t 'sudo systemctl enable kubelet'
done
rm -f 20-etcd-service-manager.conf
```
### Create configuration
Create temp directories to store files that will end up on `etcd` hosts:
```bash
mkdir -p /tmp/${ETCD0}/ /tmp/${ETCD1}/ /tmp/${ETCD2}/
NAMES=("etcd00" "etcd01" "etcd02")
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
NAME=${NAMES[$i]}
cat <<EOF | sudo tee /tmp/${HOST}/kubeadmcfg.yaml
apiVersion: "kubeadm.k8s.io/v1beta2"
kind: ClusterConfiguration
etcd:
local:
serverCertSANs:
- "${HOST}"
peerCertSANs:
- "${HOST}"
extraArgs:
initial-cluster: ${NAMES[0]}=https://${ETCDHOSTS[0]}:2380,${NAMES[1]}=https://${ETCDHOSTS[1]}:2380,${NAMES[2]}=https://${ETCDHOSTS[2]}:2380
initial-cluster-state: new
name: ${NAME}
listen-peer-urls: https://${HOST}:2380
listen-client-urls: https://${HOST}:2379
advertise-client-urls: https://${HOST}:2379
initial-advertise-peer-urls: https://${HOST}:2380
auto-compaction-mode: periodic
auto-compaction-retention: 5m
quota-backend-bytes: '8589934592'
EOF
done
```
> Note:
>
> ##### Etcd compaction
>
> By enabling `etcd` authentication, it prevents the tenant apiservers (clients of `etcd`) to issue compaction requests. We set `etcd` to automatically compact the keyspace with the `--auto-compaction-*` option with a period of hours or minutes. When `--auto-compaction-mode=periodic` and `--auto-compaction-retention=5m` and writes per minute are about 1000, `etcd` compacts revision 5000 for every 5 minute.
>
> ##### Etcd storage quota
>
> Currently, `etcd` is limited in storage size, defaulted to `2GB` and configurable with `--quota-backend-bytes` flag up to `8GB`. In Kamaji, we use a single `etcd` to store multiple tenant clusters, so we need to increase this size. Please, note `etcd` warns at startup if the configured value exceeds `8GB`.
### Generate certificates
On the bootstrap machine, using `kubeadm` init phase, create and distribute `etcd` CA certificates:
```bash
sudo kubeadm init phase certs etcd-ca
mkdir kamaji
sudo cp -r /etc/kubernetes/pki/etcd kamaji
sudo chown -R ${USER}. kamaji/etcd
```
For each `etcd` host:
```bash
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
sudo kubeadm init phase certs etcd-server --config=/tmp/${HOST}/kubeadmcfg.yaml
sudo kubeadm init phase certs etcd-peer --config=/tmp/${HOST}/kubeadmcfg.yaml
sudo kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST}/kubeadmcfg.yaml
sudo cp -R /etc/kubernetes/pki /tmp/${HOST}/
sudo find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete
done
```
### Startup the cluster
Upload certificates on each `etcd` node and restart the `kubelet`
```bash
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
sudo chown -R ${USER}. /tmp/${HOST}
scp -r /tmp/${HOST}/* ${USER}@${HOST}:
ssh ${USER}@${HOST} -t 'sudo chown -R root:root pki'
ssh ${USER}@${HOST} -t 'sudo mv pki /etc/kubernetes/'
ssh ${USER}@${HOST} -t 'sudo kubeadm init phase etcd local --config=kubeadmcfg.yaml'
ssh ${USER}@${HOST} -t 'sudo systemctl daemon-reload'
ssh ${USER}@${HOST} -t 'sudo systemctl restart kubelet'
done
```
This will start the static `etcd` pod on each node and then the cluster gets formed.
Generate certificates for the `root` user
```bash
cat > root-csr.json <<EOF
{
"CN": "root",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
```
```bash
cfssl gencert \
-ca=kamaji/etcd/ca.crt \
-ca-key=kamaji/etcd/ca.key \
-config=cfssl-cert-config.json \
-profile=client-authentication \
root-csr.json | cfssljson -bare root
```
```bash
cp root.pem kamaji/etcd/root.crt
cp root-key.pem kamaji/etcd/root.key
rm root*
```
The result should be:
```bash
$ tree kamaji
kamaji
└── etcd
├── ca.crt
├── ca.key
├── root.crt
└── root.key
```
Use the `root` user to check the just formed `etcd` cluster is in health state
```bash
export ETCDCTL_CACERT=kamaji/etcd/ca.crt
export ETCDCTL_CERT=kamaji/etcd/root.crt
export ETCDCTL_KEY=kamaji/etcd/root.key
export ETCDCTL_ENDPOINTS=https://${ETCD0}:2379
etcdctl member list -w table
```
The result should be something like this:
```
+------------------+---------+--------+----------------------------+----------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+--------+----------------------------+----------------------------+------------+
| 72657d6307364226 | started | etcd01 | https://192.168.32.11:2380 | https://192.168.32.11:2379 | false |
| 91eb892c5ee87610 | started | etcd00 | https://192.168.32.10:2380 | https://192.168.32.10:2379 | false |
| e9971c576949c34e | started | etcd02 | https://192.168.32.12:2380 | https://192.168.32.12:2379 | false |
+------------------+---------+--------+----------------------------+----------------------------+------------+
```
### Enable multi-tenancy
The `root` user has full access to `etcd`, must be created before activating authentication. The `root` user must have the `root` role and is allowed to change anything inside `etcd`.
```bash
etcdctl user add --no-password=true root
etcdctl role add root
etcdctl user grant-role root root
etcdctl auth enable
```
### Cleanup
If you want to get rid of the etcd cluster, for each node, login and clean it:
```bash
HOSTS=(${ETCD0} ${ETCD1} ${ETCD2})
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh ${USER}@${HOST} -t 'sudo kubeadm reset -f';
ssh ${USER}@${HOST} -t 'sudo systemctl reboot';
done
```
## Setup internal multi-tenant etcd
If you opted for an internal etcd cluster running in the Kamaji admin cluster, follow steps below.
From the bootstrap machine load the environment for internal `etcd` setup:
```bash
source kamaji-internal-etcd.env
```
### Generate certificates
On the bootstrap machine, using `kubeadm` init phase, create the `etcd` CA certificates:
```bash
sudo kubeadm init phase certs etcd-ca
mkdir kamaji
sudo cp -r /etc/kubernetes/pki/etcd kamaji
sudo chown -R ${USER}. kamaji/etcd
```
Generate the `etcd` certificates for peers:
```
cat << EOF | tee kamaji/etcd/peer-csr.json
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"etcd-0",
"etcd-0.etcd",
"etcd-0.etcd.${ETCD_NAMESPACE}.svc",
"etcd-0.etcd.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-1",
"etcd-1.etcd",
"etcd-1.etcd.${ETCD_NAMESPACE}.svc",
"etcd-1.etcd.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-2",
"etcd-2.etcd",
"etcd-2.etcd.${ETCD_NAMESPACE}.svc",
"etcd-2.etcd.${ETCD_NAMESPACE}.cluster.local"
]
}
EOF
cfssl gencert -ca=kamaji/etcd/ca.crt -ca-key=kamaji/etcd/ca.key \
-config=cfssl-cert-config.json \
-profile=peer-authentication kamaji/etcd/peer-csr.json | cfssljson -bare kamaji/etcd/peer
```
Generate the `etcd` certificates for server:
```
cat << EOF | tee kamaji/etcd/server-csr.json
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"etcd-server",
"etcd-server.${ETCD_NAMESPACE}.svc",
"etcd-server.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-0.etcd.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-1.etcd.${ETCD_NAMESPACE}.svc.cluster.local",
"etcd-2.etcd.${ETCD_NAMESPACE}.svc.cluster.local"
]
}
EOF
cfssl gencert -ca=kamaji/etcd/ca.crt -ca-key=kamaji/etcd/ca.key \
-config=cfssl-cert-config.json \
-profile=peer-authentication kamaji/etcd/server-csr.json | cfssljson -bare kamaji/etcd/server
```
Generate certificates for the `root` user of the `etcd`
```
cat << EOF | tee kamaji/etcd/root-csr.json
{
"CN": "root",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
cfssl gencert -ca=kamaji/etcd/ca.crt -ca-key=kamaji/etcd/ca.key \
-config=cfssl-cert-config.json \
-profile=client-authentication kamaji/etcd/root-csr.json | cfssljson -bare kamaji/etcd/root
```
Install the `etcd` in the Kamaji admin cluster
```bash
kubectl create namespace ${ETCD_NAMESPACE}
kubectl -n ${ETCD_NAMESPACE} create secret generic etcd-certs \
--from-file=kamaji/etcd/ca.crt \
--from-file=kamaji/etcd/ca.key \
--from-file=kamaji/etcd/peer-key.pem --from-file=kamaji/etcd/peer.pem \
--from-file=kamaji/etcd/server-key.pem --from-file=kamaji/etcd/server.pem
kubectl -n ${ETCD_NAMESPACE} apply -f etcd/etcd-cluster.yaml
```
Install an `etcd` client to interact with the `etcd` server
```bash
kubectl -n ${ETCD_NAMESPACE} create secret tls root-certs \
--key=kamaji/etcd/root-key.pem \
--cert=kamaji/etcd/root.pem
kubectl -n ${ETCD_NAMESPACE} apply -f etcd/etcd-client.yaml
```
Wait the etcd instances discover each other and the cluster is formed:
```bash
kubectl -n ${ETCD_NAMESPACE} wait pod --for=condition=ready -l app=etcd --timeout=120s
echo -n "\nChecking endpoint's health..."
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- /bin/bash -c "etcdctl endpoint health 1>/dev/null 2>/dev/null; until [ \$$? -eq 0 ]; do sleep 10; printf "."; etcdctl endpoint health 1>/dev/null 2>/dev/null; done;"
echo -n "\netcd cluster's health:\n"
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- /bin/bash -c "etcdctl endpoint health"
echo -n "\nWaiting for all members..."
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- /bin/bash -c "until [ \$$(etcdctl member list 2>/dev/null | wc -l) -eq 3 ]; do sleep 10; printf '.'; done;"
@echo -n "\netcd's members:\n"
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- /bin/bash -c "etcdctl member list -w table"
```
### Enable multi-tenancy
The `root` user has full access to `etcd`, must be created before activating authentication. The `root` user must have the `root` role and is allowed to change anything inside `etcd`.
```bash
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- etcdctl user add --no-password=true root
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- etcdctl role add root
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- etcdctl user grant-role root root
kubectl -n ${ETCD_NAMESPACE} exec etcd-root-client -- etcdctl auth enable
```
## Install Kamaji controller
Currently, the behaviour of the Kamaji controller for Tenant Control Plane is controlled by (in this order):
- CLI flags
- Environment variables
- Configuration file `kamaji.yaml` built into the image
By default Kamaji search for the configuration file and uses parameters found inside of it. In case some environment variable are passed, this will override configuration file parameters. In the end, if also a CLI flag is passed, this will override both env vars and config file as well.
There are multiple ways to deploy the Kamaji controller:
- Use the single YAML file installer
- Use Kustomize with Makefile
- Use the Kamaji Helm Chart
The Kamaji controller needs to access the multi-tenant `etcd` in order to provision the access for tenant `kube-apiserver`.
Create the secrets containing the `etcd` certificates
```bash
kubectl create namespace kamaji-system
kubectl -n kamaji-system create secret generic etcd-certs \
--from-file=kamaji/etcd/ca.crt \
--from-file=kamaji/etcd/ca.key
kubectl -n kamaji-system create secret tls root-client-certs \
--cert=kamaji/etcd/root.crt \
--key=kamaji/etcd/root.key
```
### Install with a single manifest
Install with the single YAML file installer:
```bash
kubectl -n kamaji-system apply -f ../config/install.yaml
```
Make sure to patch the `etcd` endpoints of the Kamaji controller, according to your environment:
```bash
cat > patch-deploy.yaml <<EOF
spec:
template:
spec:
containers:
- name: manager
args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=127.0.0.1:8080
- --leader-elect
- --etcd-endpoints=${ETCD0}:2379,${ETCD1}:2379,${ETCD2}:2379
EOF
kubectl -n kamaji-system patch \
deployment kamaji-controller-manager \
--patch-file patch-deploy.yaml
```
The Kamaji Tenant Control Plane controller is now running on the Admin Cluster:
```bash
kubectl -n kamaji-system get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
operator-controller-manager 1/1 1 1 14h
```
## Setup Tenant Cluster
Now you are getting an Admin Cluster available to run multiple Tenant Control Planes, deployed by the Kamaji controller. Please, refer to the Kamaji Tenant Deployment [guide](./kamaji-tenant-deployment-guide.md).

View File

@@ -67,6 +67,11 @@ spec:
podCidr: ${TENANT_POD_CIDR}
dnsServiceIPs:
- ${TENANT_DNS_SERVICE}
addons:
coreDNS:
enabled: true
kubeProxy:
enabled: true
EOF
```

View File

@@ -81,3 +81,28 @@ $ make yaml-installation-file
```
It will generate a yaml installation file at `config/installation.yaml`. It should be customize accordingly.
## Tenant Control Planes
### Add-ons
Kamaji provides optional installations into the deployed tenant control plane through add-ons. Is it possible to enable/disable them through the `tcp` definition.
By default, add-ons are installed if nothing is specified in the `tcp` definition.
### Core DNS
```yaml
addons:
coreDNS:
enabled: true
```
### Kube-Proxy
```yaml
addons:
kubeProxy:
enabled: true
```

View File

@@ -1,16 +1,16 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package controllers
package e2e
import (
"path/filepath"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
@@ -40,8 +40,7 @@ var _ = BeforeSuite(func() {
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
UseExistingCluster: pointer.Bool(true),
}
cfg, err := testEnv.Start()

View File

@@ -0,0 +1,85 @@
package e2e
import (
"context"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
)
var _ = Describe("Deploy a TenantControlPlane resource", func() {
// Fill TenantControlPlane object
tcp := kamajiv1alpha1.TenantControlPlane{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: "tenant-test",
Namespace: "kamaji-system",
},
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
ControlPlane: kamajiv1alpha1.ControlPlane{
Deployment: kamajiv1alpha1.DeploymentSpec{
Replicas: 1,
},
Ingress: kamajiv1alpha1.IngressSpec{
Enabled: true,
},
Service: kamajiv1alpha1.ServiceSpec{
ServiceType: "NodePort",
},
},
NetworkProfile: kamajiv1alpha1.NetworkProfileSpec{
Address: "172.18.0.2",
DNSServiceIPs: []string{"10.96.0.10"},
Domain: "clastix.labs",
PodCIDR: "10.244.0.0/16",
Port: 31443,
ServiceCIDR: "10.96.0.0/16",
},
Kubernetes: kamajiv1alpha1.KubernetesSpec{
Version: "v1.23.6",
Kubelet: kamajiv1alpha1.KubeletSpec{
CGroupFS: "cgroupfs",
},
AdmissionControllers: kamajiv1alpha1.AdmissionControllers{
"LimitRanger",
"ResourceQuota",
},
},
},
}
// Create a TenantControlPlane resource into the cluster
JustBeforeEach(func() {
Expect(k8sClient.Create(context.Background(), &tcp)).NotTo(HaveOccurred())
})
// Delete the TenantControlPlane resource after test is finished
JustAfterEach(func() {
Expect(k8sClient.Delete(context.Background(), &tcp)).Should(Succeed())
})
// Check if TenantControlPlane resource has been created
It("Should be Ready", func() {
Eventually(func() kamajiv1alpha1.KubernetesVersionStatus {
err := k8sClient.Get(context.Background(), types.NamespacedName{
Name: tcp.GetName(),
Namespace: tcp.GetNamespace(),
}, &tcp)
if err != nil {
return ""
}
// Check if Status field has been created on TenantControlPlane struct
if *&tcp.Status.Kubernetes.Version.Status == nil {
return ""
}
return *tcp.Status.Kubernetes.Version.Status
}, 5*time.Minute, time.Second).Should(Equal(kamajiv1alpha1.VersionReady))
})
})

2
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/clastix/kamaji
go 1.17
go 1.18
require (
github.com/go-logr/logr v1.2.0

313
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -55,6 +55,8 @@ Kubernetes: `>=1.18`
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| addons.coreDNS.enabled | boolean | `true` | Enabling CoreDNS installation. If the value is not specified, the installation is enabled |
| addons.kubeProxy.enabled | boolean | `true` | Enabling KubeProxy installation. If the value is not specified, the installation is enabled |
| affinity | object | `{}` | Kubernetes affinity rules to apply to Kamaji controller pods |
| configPath | string | `"./kamaji.yaml"` | Configuration file path alternative. (default "./kamaji.yaml") |
| etcd.caSecret.name | string | `"etcd-certs"` | Name of the secret which contains CA's certificate and private key. (default: "etcd-certs") |
@@ -67,7 +69,7 @@ Kubernetes: `>=1.18`
| fullnameOverride | string | `""` | |
| healthProbeBindAddress | string | `":8081"` | The address the probe endpoint binds to. (default ":8081") |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"quay.io/clastix/kamaji"` | The container image of the Kamaji controller. |
| image.repository | string | `"clastix/kamaji"` | The container image of the Kamaji controller. |
| image.tag | string | `"latest"` | |
| imagePullSecrets | list | `[]` | |
| ingress.annotations | object | `{}` | |

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,7 @@ replicaCount: 1
image:
# -- The container image of the Kamaji controller.
repository: quay.io/clastix/kamaji
repository: clastix/kamaji
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: latest
@@ -134,3 +134,10 @@ temporaryDirectoryPath: "/tmp/kamaji"
loggingDevel:
# -- (string) Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default false)
enable: false
# -- Kubernetes Addons
addons:
coreDNS:
enabled: true
kubeProxy:
enabled: true

16
internal/errors/errors.go Normal file
View File

@@ -0,0 +1,16 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package errors
type NonExposedLoadBalancerError struct{}
func (n NonExposedLoadBalancerError) Error() string {
return "cannot retrieve the TenantControlPlane address, Service resource is not yet exposed as LoadBalancer"
}
type MissingValidIPError struct{}
func (m MissingValidIPError) Error() string {
return "the actual resource doesn't have yet a valid IP address"
}

View File

@@ -0,0 +1,10 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package errors
import "github.com/pkg/errors"
func ShouldReconcileErrorBeIgnored(err error) bool {
return errors.As(err, &NonExposedLoadBalancerError{}) || errors.As(err, &MissingValidIPError{})
}

View File

@@ -4,12 +4,14 @@
package kubeadm
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api/v1"
@@ -22,11 +24,84 @@ import (
"k8s.io/utils/pointer"
)
func CoreDNSAddon(client kubernetes.Interface, config *Configuration) error {
const (
kubeSystemNamespace = "kube-system"
kubeProxyName = "kube-proxy"
coreDNSName = "coredns"
kubeDNSName = "kube-dns"
)
func AddCoreDNS(client kubernetes.Interface, config *Configuration) error {
return dns.EnsureDNSAddon(&config.InitConfiguration.ClusterConfiguration, client)
}
func KubeProxyAddon(client kubernetes.Interface, config *Configuration) error {
func RemoveCoreDNSAddon(ctx context.Context, client kubernetes.Interface) error {
var result error
if err := removeCoreDNSService(ctx, client); err != nil {
if !k8serrors.IsNotFound(err) {
return err
}
result = err
}
if err := removeCoreDNSDeployment(ctx, client); err != nil {
if !k8serrors.IsNotFound(err) {
return err
}
result = err
}
if err := removeCoreDNSConfigMap(ctx, client); err != nil {
if !k8serrors.IsNotFound(err) {
return err
}
result = err
}
return result
}
func removeCoreDNSService(ctx context.Context, client kubernetes.Interface) error {
name, _ := getCoreDNSServiceName(ctx)
opts := metav1.DeleteOptions{}
return client.CoreV1().Services(kubeSystemNamespace).Delete(ctx, name, opts)
}
func removeCoreDNSDeployment(ctx context.Context, client kubernetes.Interface) error {
name, _ := getCoreDNSDeploymentName(ctx)
opts := metav1.DeleteOptions{}
return client.AppsV1().Deployments(kubeSystemNamespace).Delete(ctx, name, opts)
}
func removeCoreDNSConfigMap(ctx context.Context, client kubernetes.Interface) error {
name, _ := getCoreDNSConfigMapName(ctx)
opts := metav1.DeleteOptions{}
return client.CoreV1().ConfigMaps(kubeSystemNamespace).Delete(ctx, name, opts)
}
func getCoreDNSServiceName(ctx context.Context) (string, error) {
// TODO: Currently, DNS is installed using kubeadm phases, therefore we know the name.
// Implement a method for future approaches
return kubeDNSName, nil
}
func getCoreDNSDeploymentName(ctx context.Context) (string, error) {
// TODO: Currently, DNS is installed using kubeadm phases, therefore we know the name.
// Implement a method for future approaches
return coreDNSName, nil
}
func getCoreDNSConfigMapName(ctx context.Context) (string, error) {
// TODO: Currently, DNS is installed using kubeadm phases, therefore we know the name.
// Implement a method for future approaches
return coreDNSName, nil
}
func AddKubeProxy(client kubernetes.Interface, config *Configuration) error {
if err := proxy.CreateServiceAccount(client); err != nil {
return errors.Wrap(err, "error when creating kube-proxy service account")
}
@@ -46,6 +121,95 @@ func KubeProxyAddon(client kubernetes.Interface, config *Configuration) error {
return nil
}
func RemoveKubeProxy(ctx context.Context, client kubernetes.Interface) error {
var result error
if err := removeKubeProxyDaemonSet(ctx, client); err != nil {
if !k8serrors.IsNotFound(err) {
return err
}
result = err
}
if err := removeKubeProxyConfigMap(ctx, client); err != nil {
if !k8serrors.IsNotFound(err) {
return err
}
result = err
}
if err := removeKubeProxyRBAC(ctx, client); err != nil {
if !k8serrors.IsNotFound(err) {
return err
}
result = err
}
return result
}
func removeKubeProxyDaemonSet(ctx context.Context, client kubernetes.Interface) error {
name, _ := getKubeProxyDaemonSetName(ctx)
opts := metav1.DeleteOptions{}
return client.AppsV1().DaemonSets(kubeSystemNamespace).Delete(ctx, name, opts)
}
func removeKubeProxyConfigMap(ctx context.Context, client kubernetes.Interface) error {
name, _ := getKubeProxyConfigMapName(ctx)
opts := metav1.DeleteOptions{}
return client.CoreV1().ConfigMaps(kubeSystemNamespace).Delete(ctx, name, opts)
}
func removeKubeProxyRBAC(ctx context.Context, client kubernetes.Interface) error {
// TODO: Currently, kube-proxy is installed using kubeadm phases, therefore, name is the same.
name, _ := getKubeProxyRBACName(ctx)
opts := metav1.DeleteOptions{}
var result error
if err := client.RbacV1().RoleBindings(kubeSystemNamespace).Delete(ctx, name, opts); err != nil {
if !k8serrors.IsNotFound(err) {
return err
}
result = err
}
if err := client.RbacV1().Roles(kubeSystemNamespace).Delete(ctx, name, opts); err != nil {
if !k8serrors.IsNotFound(err) {
return err
}
result = err
}
if err := client.CoreV1().ServiceAccounts(kubeSystemNamespace).Delete(ctx, name, opts); err != nil {
if !k8serrors.IsNotFound(err) {
return err
}
result = err
}
return result
}
func getKubeProxyRBACName(ctx context.Context) (string, error) {
// TODO: Currently, kube-proxy is installed using kubeadm phases, therefore we know the name.
// Implement a method for future approaches
return kubeProxyName, nil
}
func getKubeProxyDaemonSetName(ctx context.Context) (string, error) {
// TODO: Currently, kube-proxy is installed using kubeadm phases, therefore we know the name.
// Implement a method for future approaches
return kubeProxyName, nil
}
func getKubeProxyConfigMapName(ctx context.Context) (string, error) {
// TODO: Currently, kube-proxy is installed using kubeadm phases, therefore we know the name.
// Implement a method for future approaches
return kubeProxyName, nil
}
func createKubeProxyConfigMap(client kubernetes.Interface, config *Configuration) error {
configConf, err := getKubeproxyConfigmapContent(config)
if err != nil {

View File

@@ -37,7 +37,7 @@ func (r *APIServerCertificate) ShouldCleanup(plane *kamajiv1alpha1.TenantControl
return false
}
func (r *APIServerCertificate) CleanUp(ctx context.Context) (bool, error) {
func (r *APIServerCertificate) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}

View File

@@ -37,7 +37,7 @@ func (r *APIServerKubeletClientCertificate) ShouldCleanup(plane *kamajiv1alpha1.
return false
}
func (r *APIServerKubeletClientCertificate) CleanUp(ctx context.Context) (bool, error) {
func (r *APIServerKubeletClientCertificate) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}

View File

@@ -36,7 +36,7 @@ func (r *CACertificate) ShouldCleanup(plane *kamajiv1alpha1.TenantControlPlane)
return false
}
func (r *CACertificate) CleanUp(ctx context.Context) (bool, error) {
func (r *CACertificate) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}

View File

@@ -4,5 +4,6 @@
package resources
const (
defaultIngressPort = 443
kubeconfigAdminKeyName = "admin.conf"
defaultIngressPort = 443
)

View File

@@ -43,7 +43,7 @@ func (r *ETCDCACertificatesResource) ShouldCleanup(plane *kamajiv1alpha1.TenantC
return false
}
func (r *ETCDCACertificatesResource) CleanUp(ctx context.Context) (bool, error) {
func (r *ETCDCACertificatesResource) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}

View File

@@ -40,7 +40,7 @@ func (r *ETCDCertificatesResource) ShouldCleanup(plane *kamajiv1alpha1.TenantCon
return false
}
func (r *ETCDCertificatesResource) CleanUp(ctx context.Context) (bool, error) {
func (r *ETCDCertificatesResource) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}

View File

@@ -52,7 +52,7 @@ func (r *ETCDSetupResource) ShouldCleanup(plane *kamajiv1alpha1.TenantControlPla
return false
}
func (r *ETCDSetupResource) CleanUp(ctx context.Context) (bool, error) {
func (r *ETCDSetupResource) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}

View File

@@ -37,7 +37,7 @@ func (r *FrontProxyClientCertificate) ShouldCleanup(plane *kamajiv1alpha1.Tenant
return false
}
func (r *FrontProxyClientCertificate) CleanUp(ctx context.Context) (bool, error) {
func (r *FrontProxyClientCertificate) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}

View File

@@ -36,7 +36,7 @@ func (r *FrontProxyCACertificate) ShouldCleanup(plane *kamajiv1alpha1.TenantCont
return false
}
func (r *FrontProxyCACertificate) CleanUp(ctx context.Context) (bool, error) {
func (r *FrontProxyCACertificate) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}

View File

@@ -48,7 +48,7 @@ func (r *KubernetesDeploymentResource) ShouldCleanup(plane *kamajiv1alpha1.Tenan
return false
}
func (r *KubernetesDeploymentResource) CleanUp(ctx context.Context) (bool, error) {
func (r *KubernetesDeploymentResource) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}

View File

@@ -33,7 +33,7 @@ func (r *KubernetesIngressResource) ShouldCleanup(tenantControlPlane *kamajiv1al
return !tenantControlPlane.Spec.ControlPlane.Ingress.Enabled
}
func (r *KubernetesIngressResource) CleanUp(ctx context.Context) (bool, error) {
func (r *KubernetesIngressResource) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
if err := r.Client.Delete(ctx, r.resource); err != nil {
if !k8serrors.IsNotFound(err) {
return false, err

View File

@@ -34,7 +34,7 @@ func (r *KubernetesServiceResource) ShouldCleanup(plane *kamajiv1alpha1.TenantCo
return false
}
func (r *KubernetesServiceResource) CleanUp(ctx context.Context) (bool, error) {
func (r *KubernetesServiceResource) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}

View File

@@ -0,0 +1,185 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package resources
import (
"context"
"fmt"
"github.com/go-logr/logr"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
kamajiapi "github.com/clastix/kamaji/api"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/kubeadm"
)
type KubeadmAddon int
const (
AddonCoreDNS KubeadmAddon = iota
AddonKubeProxy
)
func (d KubeadmAddon) String() string {
return [...]string{"PhaseAddonCoreDNS", "PhaseAddonKubeProxy"}[d]
}
type KubeadmAddonResource struct {
Client client.Client
Log logr.Logger
Name string
KubeadmAddon KubeadmAddon
kubeadmConfigResourceVersion string
}
func (r *KubeadmAddonResource) isStatusEqual(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
addonSpec, err := r.getSpec(tenantControlPlane)
if err != nil {
return false
}
i, err := r.GetStatus(tenantControlPlane)
if err != nil {
return false
}
addonStatus, ok := i.(*kamajiv1alpha1.AddonStatus)
if !ok {
return false
}
return *addonSpec.Enabled == addonStatus.Enabled
}
func (r *KubeadmAddonResource) SetKubeadmConfigResourceVersion(rv string) {
r.kubeadmConfigResourceVersion = rv
}
func (r *KubeadmAddonResource) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return !r.isStatusEqual(tenantControlPlane)
}
func (r *KubeadmAddonResource) ShouldCleanup(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
spec, err := r.getSpec(tenantControlPlane)
if err != nil {
return false
}
return !*spec.Enabled
}
func (r *KubeadmAddonResource) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
client, err := GetRESTClient(ctx, r, tenantControlPlane)
if err != nil {
return false, err
}
fun, err := r.getRemoveAddonFunction()
if err != nil {
return false, err
}
if err := fun(ctx, client); err != nil {
if !k8serrors.IsNotFound(err) {
return false, err
}
return false, nil
}
return true, nil
}
func (r *KubeadmAddonResource) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
return nil
}
func (r *KubeadmAddonResource) GetKubeadmFunction() (func(clientset.Interface, *kubeadm.Configuration) error, error) {
switch r.KubeadmAddon {
case AddonCoreDNS:
return kubeadm.AddCoreDNS, nil
case AddonKubeProxy:
return kubeadm.AddKubeProxy, nil
default:
return nil, fmt.Errorf("no available functionality for phase %s", r.KubeadmAddon)
}
}
func (r *KubeadmAddonResource) getRemoveAddonFunction() (func(context.Context, clientset.Interface) error, error) {
switch r.KubeadmAddon {
case AddonCoreDNS:
return kubeadm.RemoveCoreDNSAddon, nil
case AddonKubeProxy:
return kubeadm.RemoveKubeProxy, nil
default:
return nil, fmt.Errorf("no available functionality for removing addon %s", r.KubeadmAddon)
}
}
func (r *KubeadmAddonResource) GetClient() client.Client {
return r.Client
}
func (r *KubeadmAddonResource) GetTmpDirectory() string {
return ""
}
func (r *KubeadmAddonResource) GetName() string {
return r.Name
}
func (r *KubeadmAddonResource) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
i, err := r.GetStatus(tenantControlPlane)
if err != nil {
return err
}
addonSpec, err := r.getSpec(tenantControlPlane)
if err != nil {
return err
}
status, ok := i.(*kamajiv1alpha1.AddonStatus)
if !ok {
return fmt.Errorf("error addon status")
}
status.Enabled = *addonSpec.Enabled
status.LastUpdate = metav1.Now()
status.KubeadmConfigResourceVersion = r.kubeadmConfigResourceVersion
return nil
}
func (r *KubeadmAddonResource) GetStatus(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (kamajiapi.KubeadmConfigResourceVersionDependant, error) {
switch r.KubeadmAddon {
case AddonCoreDNS:
return &tenantControlPlane.Status.Addons.CoreDNS, nil
case AddonKubeProxy:
return &tenantControlPlane.Status.Addons.KubeProxy, nil
default:
return nil, fmt.Errorf("%s has no addon status", r.KubeadmAddon)
}
}
func (r *KubeadmAddonResource) getSpec(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (*kamajiv1alpha1.AddonSpec, error) {
switch r.KubeadmAddon {
case AddonCoreDNS:
return &tenantControlPlane.Spec.Addons.CoreDNS, nil
case AddonKubeProxy:
return &tenantControlPlane.Spec.Addons.KubeProxy, nil
default:
return nil, fmt.Errorf("%s has no spec", r.KubeadmAddon)
}
}
func (r *KubeadmAddonResource) CreateOrUpdate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
return KubeadmPhaseCreate(ctx, r, tenantControlPlane)
}

View File

@@ -51,7 +51,7 @@ func (r *KubeadmConfigResource) ShouldCleanup(plane *kamajiv1alpha1.TenantContro
return false
}
func (r *KubeadmConfigResource) CleanUp(ctx context.Context) (bool, error) {
func (r *KubeadmConfigResource) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}

View File

@@ -6,86 +6,81 @@ package resources
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
bootstraptokenv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
kamajiapi "github.com/clastix/kamaji/api"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/kubeadm"
kubeconfigutil "github.com/clastix/kamaji/internal/kubeconfig"
)
type kubeadmPhase int
const kubeadmPhaseTimeout = 10 // seconds
type KubeadmPhase int
const (
PhaseUploadConfigKubeadm KubeadmPhase = iota
PhaseUploadConfigKubeadm kubeadmPhase = iota
PhaseUploadConfigKubelet
PhaseAddonCoreDNS
PhaseAddonKubeProxy
PhaseBootstrapToken
)
func (d KubeadmPhase) String() string {
func (d kubeadmPhase) String() string {
return [...]string{"PhaseUploadConfigKubeadm", "PhaseUploadConfigKubelet", "PhaseAddonCoreDNS", "PhaseAddonKubeProxy", "PhaseBootstrapToken"}[d]
}
const (
kubeconfigAdminKeyName = "admin.conf"
)
type KubeadmPhaseResource struct {
type KubeadmPhase struct {
Client client.Client
Log logr.Logger
Name string
KubeadmPhase KubeadmPhase
Phase kubeadmPhase
kubeadmConfigResourceVersion string
}
func (r *KubeadmPhaseResource) isStatusEqual(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
status, err := r.getStatus(tenantControlPlane)
func (r *KubeadmPhase) isStatusEqual(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
i, err := r.GetStatus(tenantControlPlane)
if err != nil {
return true
}
status, ok := i.(*kamajiv1alpha1.KubeadmPhaseStatus)
if !ok {
return false
}
return status.KubeadmConfigResourceVersion == r.kubeadmConfigResourceVersion
}
func (r *KubeadmPhaseResource) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
func (r *KubeadmPhase) SetKubeadmConfigResourceVersion(rv string) {
r.kubeadmConfigResourceVersion = rv
}
func (r *KubeadmPhase) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return !r.isStatusEqual(tenantControlPlane)
}
func (r *KubeadmPhaseResource) ShouldCleanup(plane *kamajiv1alpha1.TenantControlPlane) bool {
func (r *KubeadmPhase) ShouldCleanup(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return false
}
func (r *KubeadmPhaseResource) CleanUp(ctx context.Context) (bool, error) {
func (r *KubeadmPhase) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}
func (r *KubeadmPhaseResource) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
func (r *KubeadmPhase) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
return nil
}
func (r *KubeadmPhaseResource) getKubeadmPhaseFunction() (func(clientset.Interface, *kubeadm.Configuration) error, error) {
switch r.KubeadmPhase {
func (r *KubeadmPhase) GetKubeadmFunction() (func(clientset.Interface, *kubeadm.Configuration) error, error) {
switch r.Phase {
case PhaseUploadConfigKubeadm:
return kubeadm.UploadKubeadmConfig, nil
case PhaseUploadConfigKubelet:
return kubeadm.UploadKubeletConfig, nil
case PhaseAddonCoreDNS:
return kubeadm.CoreDNSAddon, nil
case PhaseAddonKubeProxy:
return kubeadm.KubeProxyAddon, nil
case PhaseBootstrapToken:
return func(client clientset.Interface, config *kubeadm.Configuration) error {
bootstrapTokensEnrichment(config.InitConfiguration.BootstrapTokens)
@@ -93,7 +88,7 @@ func (r *KubeadmPhaseResource) getKubeadmPhaseFunction() (func(clientset.Interfa
return kubeadm.BootstrapToken(client, config)
}, nil
default:
return nil, fmt.Errorf("no available functionality for phase %s", r.KubeadmPhase)
return nil, fmt.Errorf("no available functionality for phase %s", r.Phase)
}
}
@@ -117,155 +112,48 @@ func enrichBootstrapToken(bootstrapToken *bootstraptokenv1.BootstrapToken) {
}
}
func (r *KubeadmPhaseResource) GetClient() client.Client {
func (r *KubeadmPhase) GetClient() client.Client {
return r.Client
}
func (r *KubeadmPhaseResource) GetTmpDirectory() string {
func (r *KubeadmPhase) GetTmpDirectory() string {
return ""
}
func (r *KubeadmPhaseResource) GetName() string {
func (r *KubeadmPhase) GetName() string {
return r.Name
}
func (r *KubeadmPhaseResource) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
status, err := r.getStatus(tenantControlPlane)
func (r *KubeadmPhase) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
i, err := r.GetStatus(tenantControlPlane)
if err != nil {
return err
}
status.LastUpdate = metav1.Now()
status.KubeadmConfigResourceVersion = r.kubeadmConfigResourceVersion
kubeadmStatus, ok := i.(*kamajiv1alpha1.KubeadmPhaseStatus)
if !ok {
return fmt.Errorf("error status kubeadm phase")
}
kubeadmStatus.LastUpdate = metav1.Now()
kubeadmStatus.KubeadmConfigResourceVersion = r.kubeadmConfigResourceVersion
return nil
}
func (r *KubeadmPhaseResource) getStatus(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (*kamajiv1alpha1.KubeadmPhaseStatus, error) {
switch r.KubeadmPhase {
func (r *KubeadmPhase) GetStatus(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (kamajiapi.KubeadmConfigResourceVersionDependant, error) {
switch r.Phase {
case PhaseUploadConfigKubeadm:
return &tenantControlPlane.Status.KubeadmPhase.UploadConfigKubeadm, nil
case PhaseUploadConfigKubelet:
return &tenantControlPlane.Status.KubeadmPhase.UploadConfigKubelet, nil
case PhaseAddonCoreDNS:
return &tenantControlPlane.Status.KubeadmPhase.AddonCoreDNS, nil
case PhaseAddonKubeProxy:
return &tenantControlPlane.Status.KubeadmPhase.AddonKubeProxy, nil
case PhaseBootstrapToken:
return &tenantControlPlane.Status.KubeadmPhase.BootstrapToken, nil
default:
return nil, fmt.Errorf("%s is not a right kubeadm phase", r.KubeadmPhase)
return nil, fmt.Errorf("%s is not a right kubeadm phase", r.Phase)
}
}
func (r *KubeadmPhaseResource) CreateOrUpdate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
return r.reconcile(ctx, tenantControlPlane)
}
func (r *KubeadmPhaseResource) reconcile(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
config, resourceVersion, err := getKubeadmConfiguration(ctx, r, tenantControlPlane)
if err != nil {
return controllerutil.OperationResultNone, err
}
kubeconfig, err := r.getKubeconfig(ctx, tenantControlPlane)
if err != nil {
return controllerutil.OperationResultNone, err
}
config.Kubeconfig = *kubeconfig
config.Parameters = kubeadm.Parameters{
TenantControlPlaneName: tenantControlPlane.GetName(),
TenantDNSServiceIPs: tenantControlPlane.Spec.NetworkProfile.DNSServiceIPs,
TenantControlPlaneVersion: tenantControlPlane.Spec.Kubernetes.Version,
TenantControlPlanePodCIDR: tenantControlPlane.Spec.NetworkProfile.PodCIDR,
TenantControlPlaneAddress: tenantControlPlane.Spec.NetworkProfile.Address,
TenantControlPlanePort: tenantControlPlane.Spec.NetworkProfile.Port,
TenantControlPlaneCGroupDriver: tenantControlPlane.Spec.Kubernetes.Kubelet.CGroupFS.String(),
}
status, err := r.getStatus(tenantControlPlane)
if err != nil {
return controllerutil.OperationResultNone, err
}
if resourceVersion == status.KubeadmConfigResourceVersion {
r.kubeadmConfigResourceVersion = resourceVersion
return controllerutil.OperationResultNone, nil
}
client, err := r.getRESTClient(ctx, tenantControlPlane)
if err != nil {
return controllerutil.OperationResultNone, err
}
fun, err := r.getKubeadmPhaseFunction()
if err != nil {
return controllerutil.OperationResultNone, err
}
if err = fun(client, config); err != nil {
return controllerutil.OperationResultNone, err
}
r.kubeadmConfigResourceVersion = resourceVersion
if status.LastUpdate.IsZero() {
return controllerutil.OperationResultCreated, nil
}
return controllerutil.OperationResultUpdated, nil
}
func (r *KubeadmPhaseResource) getKubeconfigSecret(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (*corev1.Secret, error) {
kubeconfigSecretName := tenantControlPlane.Status.KubeConfig.Admin.SecretName
namespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: kubeconfigSecretName}
secret := &corev1.Secret{}
if err := r.Client.Get(ctx, namespacedName, secret); err != nil {
return nil, err
}
return secret, nil
}
func (r *KubeadmPhaseResource) getKubeconfig(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (*kubeconfigutil.Kubeconfig, error) {
secretKubeconfig, err := r.getKubeconfigSecret(ctx, tenantControlPlane)
if err != nil {
return nil, err
}
bytes, ok := secretKubeconfig.Data[kubeconfigAdminKeyName]
if !ok {
return nil, fmt.Errorf("%s is not into kubeconfig secret", kubeconfigAdminKeyName)
}
return kubeconfigutil.GetKubeconfigFromBytes(bytes)
}
func (r *KubeadmPhaseResource) getRESTClient(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (*clientset.Clientset, error) {
config, err := r.getRESTClientConfig(ctx, tenantControlPlane)
if err != nil {
return nil, err
}
return clientset.NewForConfig(config)
}
func (r *KubeadmPhaseResource) getRESTClientConfig(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (*restclient.Config, error) {
kubeconfig, err := r.getKubeconfig(ctx, tenantControlPlane)
if err != nil {
return nil, err
}
config := &restclient.Config{
Host: fmt.Sprintf("https://%s:%d", getTenantControllerInternalFQDN(*tenantControlPlane), tenantControlPlane.Spec.NetworkProfile.Port),
TLSClientConfig: restclient.TLSClientConfig{
CAData: kubeconfig.Clusters[0].Cluster.CertificateAuthorityData,
CertData: kubeconfig.AuthInfos[0].AuthInfo.ClientCertificateData,
KeyData: kubeconfig.AuthInfos[0].AuthInfo.ClientKeyData,
},
Timeout: time.Second * kubeadmPhaseTimeout,
}
return config, nil
func (r *KubeadmPhase) CreateOrUpdate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
return KubeadmPhaseCreate(ctx, r, tenantControlPlane)
}

View File

@@ -48,7 +48,7 @@ func (k *KubernetesUpgrade) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bo
return false
}
func (k *KubernetesUpgrade) CleanUp(context.Context) (bool, error) {
func (k *KubernetesUpgrade) CleanUp(context.Context, *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}

View File

@@ -0,0 +1,126 @@
package resources
import (
"context"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/kubeadm"
kubeconfigutil "github.com/clastix/kamaji/internal/kubeconfig"
)
func KubeadmPhaseCreate(ctx context.Context, r KubeadmPhaseResource, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
config, resourceVersion, err := getKubeadmConfiguration(ctx, r, tenantControlPlane)
if err != nil {
return controllerutil.OperationResultNone, err
}
kubeconfig, err := getKubeconfig(ctx, r, tenantControlPlane)
if err != nil {
return controllerutil.OperationResultNone, err
}
config.Kubeconfig = *kubeconfig
config.Parameters = kubeadm.Parameters{
TenantControlPlaneName: tenantControlPlane.GetName(),
TenantDNSServiceIPs: tenantControlPlane.Spec.NetworkProfile.DNSServiceIPs,
TenantControlPlaneVersion: tenantControlPlane.Spec.Kubernetes.Version,
TenantControlPlanePodCIDR: tenantControlPlane.Spec.NetworkProfile.PodCIDR,
TenantControlPlaneAddress: tenantControlPlane.Spec.NetworkProfile.Address,
TenantControlPlanePort: tenantControlPlane.Spec.NetworkProfile.Port,
TenantControlPlaneCGroupDriver: tenantControlPlane.Spec.Kubernetes.Kubelet.CGroupFS.String(),
}
status, err := r.GetStatus(tenantControlPlane)
if err != nil {
return controllerutil.OperationResultNone, err
}
storedResourceVersion := status.GetKubeadmConfigResourceVersion()
if resourceVersion == storedResourceVersion {
r.SetKubeadmConfigResourceVersion(resourceVersion)
return controllerutil.OperationResultNone, nil
}
client, err := GetRESTClient(ctx, r, tenantControlPlane)
if err != nil {
return controllerutil.OperationResultNone, err
}
fun, err := r.GetKubeadmFunction()
if err != nil {
return controllerutil.OperationResultNone, err
}
if err = fun(client, config); err != nil {
return controllerutil.OperationResultNone, err
}
r.SetKubeadmConfigResourceVersion(resourceVersion)
if storedResourceVersion == "" {
return controllerutil.OperationResultCreated, nil
}
return controllerutil.OperationResultUpdated, nil
}
func getKubeconfigSecret(ctx context.Context, r KubeadmPhaseResource, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (*corev1.Secret, error) {
kubeconfigSecretName := tenantControlPlane.Status.KubeConfig.Admin.SecretName
namespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: kubeconfigSecretName}
secret := &corev1.Secret{}
if err := r.GetClient().Get(ctx, namespacedName, secret); err != nil {
return nil, err
}
return secret, nil
}
func getKubeconfig(ctx context.Context, r KubeadmPhaseResource, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (*kubeconfigutil.Kubeconfig, error) {
secretKubeconfig, err := getKubeconfigSecret(ctx, r, tenantControlPlane)
if err != nil {
return nil, err
}
bytes, ok := secretKubeconfig.Data[kubeconfigAdminKeyName]
if !ok {
return nil, fmt.Errorf("%s is not into kubeconfig secret", kubeconfigAdminKeyName)
}
return kubeconfigutil.GetKubeconfigFromBytes(bytes)
}
func GetRESTClient(ctx context.Context, r KubeadmPhaseResource, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (*clientset.Clientset, error) {
config, err := getRESTClientConfig(ctx, r, tenantControlPlane)
if err != nil {
return nil, err
}
return clientset.NewForConfig(config)
}
func getRESTClientConfig(ctx context.Context, r KubeadmPhaseResource, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (*restclient.Config, error) {
kubeconfig, err := getKubeconfig(ctx, r, tenantControlPlane)
if err != nil {
return nil, err
}
config := &restclient.Config{
Host: fmt.Sprintf("https://%s:%d", getTenantControllerInternalFQDN(*tenantControlPlane), tenantControlPlane.Spec.NetworkProfile.Port),
TLSClientConfig: restclient.TLSClientConfig{
CAData: kubeconfig.Clusters[0].Cluster.CertificateAuthorityData,
CertData: kubeconfig.AuthInfos[0].AuthInfo.ClientCertificateData,
KeyData: kubeconfig.AuthInfos[0].AuthInfo.ClientKeyData,
},
Timeout: time.Second * kubeadmPhaseTimeout,
}
return config, nil
}

View File

@@ -47,7 +47,7 @@ func (r *KubeconfigResource) ShouldCleanup(plane *kamajiv1alpha1.TenantControlPl
return false
}
func (r *KubeconfigResource) CleanUp(ctx context.Context) (bool, error) {
func (r *KubeconfigResource) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}

View File

@@ -8,9 +8,11 @@ import (
corev1 "k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
kamajiapi "github.com/clastix/kamaji/api"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/kubeadm"
)
@@ -18,7 +20,7 @@ import (
type Resource interface {
Define(context.Context, *kamajiv1alpha1.TenantControlPlane) error
ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool
CleanUp(context.Context) (bool, error)
CleanUp(context.Context, *kamajiv1alpha1.TenantControlPlane) (bool, error)
CreateOrUpdate(context.Context, *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error)
GetName() string
ShouldStatusBeUpdated(context.Context, *kamajiv1alpha1.TenantControlPlane) bool
@@ -35,6 +37,14 @@ type KubeadmResource interface {
GetTmpDirectory() string
}
type KubeadmPhaseResource interface {
KubeadmResource
GetClient() client.Client
GetKubeadmFunction() (func(clientset.Interface, *kubeadm.Configuration) error, error)
GetStatus(*kamajiv1alpha1.TenantControlPlane) (kamajiapi.KubeadmConfigResourceVersionDependant, error)
SetKubeadmConfigResourceVersion(string)
}
type HandlerConfig struct {
Resource Resource
TenantControlPlane *kamajiv1alpha1.TenantControlPlane
@@ -50,7 +60,7 @@ func Handle(ctx context.Context, resource Resource, tenantControlPlane *kamajiv1
return createOrUpdate(ctx, resource, tenantControlPlane)
}
cleanUp, err := resource.CleanUp(ctx)
cleanUp, err := resource.CleanUp(ctx, tenantControlPlane)
if err != nil {
return controllerutil.OperationResultNone, err
}

View File

@@ -36,7 +36,7 @@ func (r *SACertificate) ShouldCleanup(plane *kamajiv1alpha1.TenantControlPlane)
return false
}
func (r *SACertificate) CleanUp(ctx context.Context) (bool, error) {
func (r *SACertificate) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
return false, nil
}