Compare commits
139 Commits
edge-24.12
...
edge-25.7.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
09c9743465 | ||
|
|
8290e84c3f | ||
|
|
678aca6229 | ||
|
|
d6a94dfa5e | ||
|
|
3fd1882e43 | ||
|
|
d40996daa9 | ||
|
|
b5956e43a5 | ||
|
|
c97767b54f | ||
|
|
464984f091 | ||
|
|
d7b21b5814 | ||
|
|
3230a70475 | ||
|
|
b0c9034994 | ||
|
|
22f9c36b15 | ||
|
|
d5d0295736 | ||
|
|
80afd43c9f | ||
|
|
32ef65820d | ||
|
|
eeb12c232b | ||
|
|
ce8d5f2516 | ||
|
|
1ac72ff22f | ||
|
|
501bd7a7ca | ||
|
|
ba3249f220 | ||
|
|
c156322fe3 | ||
|
|
ca622ef9ae | ||
|
|
a9c324e2e5 | ||
|
|
95a32ee5d4 | ||
|
|
9874700b28 | ||
|
|
994162c5b0 | ||
|
|
8ba99bd6c6 | ||
|
|
16438ebed8 | ||
|
|
f750073af6 | ||
|
|
6b10c89d2f | ||
|
|
8bd1f53568 | ||
|
|
9db4ccc5f1 | ||
|
|
fd49c238f5 | ||
|
|
b53adbfd6e | ||
|
|
994ca7687d | ||
|
|
c2bb50933a | ||
|
|
b027e23b99 | ||
|
|
728ac21ffa | ||
|
|
4595b79ddd | ||
|
|
335ecfbe27 | ||
|
|
fd8ffeb607 | ||
|
|
4cdfcc1347 | ||
|
|
7c785726d9 | ||
|
|
69141e5765 | ||
|
|
223aa6d4c9 | ||
|
|
2204fdad63 | ||
|
|
880a392887 | ||
|
|
2ab9dc3949 | ||
|
|
f87a057809 | ||
|
|
3e79845175 | ||
|
|
c769226e79 | ||
|
|
97d87b6a56 | ||
|
|
b68010e072 | ||
|
|
dc18f27948 | ||
|
|
d3f75feb12 | ||
|
|
94a64d1f75 | ||
|
|
ec523d3490 | ||
|
|
20b8a3aca0 | ||
|
|
37c548bf8d | ||
|
|
0ebbdae4f8 | ||
|
|
ec443e6eac | ||
|
|
b2ec531183 | ||
|
|
0f3de13d26 | ||
|
|
dd099e750f | ||
|
|
aab2250e8d | ||
|
|
13243c984a | ||
|
|
df3a906bcf | ||
|
|
33664d7e40 | ||
|
|
8b22f22bd3 | ||
|
|
05aad8ce56 | ||
|
|
c91b4b3674 | ||
|
|
f64953c411 | ||
|
|
751854b310 | ||
|
|
620647b2da | ||
|
|
a8f8582ea6 | ||
|
|
7dceac8dc6 | ||
|
|
ad7c3b71e7 | ||
|
|
989dcff863 | ||
|
|
09a5b05a9c | ||
|
|
e2f4dd0dce | ||
|
|
7f7f649c7f | ||
|
|
e38979a443 | ||
|
|
c87d6ffc47 | ||
|
|
22a40409f2 | ||
|
|
e7df0f15d8 | ||
|
|
4f70df8b61 | ||
|
|
6a6c83a1c6 | ||
|
|
b0cbec9d3e | ||
|
|
3098279911 | ||
|
|
d1d092505b | ||
|
|
db2ccf1c9f | ||
|
|
e19c33337f | ||
|
|
b42ee8f1ad | ||
|
|
9d48eaecb3 | ||
|
|
f7eb53ccc0 | ||
|
|
d5ed4db445 | ||
|
|
38652260b5 | ||
|
|
b231575940 | ||
|
|
899da1aec4 | ||
|
|
3de661b4e6 | ||
|
|
2391286d4a | ||
|
|
1e9e6e497a | ||
|
|
34ff302ea2 | ||
|
|
54e6428715 | ||
|
|
2f5ba4820a | ||
|
|
c69a6079d1 | ||
|
|
c3d8b959e1 | ||
|
|
0c1c2535a8 | ||
|
|
c9547220bf | ||
|
|
abfc65a546 | ||
|
|
f238820c0d | ||
|
|
521fbf98bb | ||
|
|
dedfbb136b | ||
|
|
3b813ad02f | ||
|
|
339d6497ba | ||
|
|
3d0cfddefa | ||
|
|
e154611090 | ||
|
|
1ddaeb3aae | ||
|
|
d3580c8bc1 | ||
|
|
cb58ad680d | ||
|
|
3c5a7af78a | ||
|
|
b721dce799 | ||
|
|
1d72802abd | ||
|
|
f82350f17b | ||
|
|
68d581abc7 | ||
|
|
af2b55d47b | ||
|
|
f9a0436a42 | ||
|
|
4b90a67e4b | ||
|
|
9cbe3b1f2b | ||
|
|
f29e2195d3 | ||
|
|
8dd805712b | ||
|
|
ae7aa54e43 | ||
|
|
c1dd106680 | ||
|
|
21c299bdda | ||
|
|
0390fca416 | ||
|
|
7824b29df8 | ||
|
|
1556adb8fa | ||
|
|
813062f345 |
21
.github/workflows/ci.yaml
vendored
@@ -7,6 +7,15 @@ on:
|
||||
branches: [ "*" ]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: integration
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- run: make test
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-22.04
|
||||
@@ -16,11 +25,13 @@ jobs:
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6.1.1
|
||||
with:
|
||||
version: v1.62.2
|
||||
only-new-issues: false
|
||||
args: --timeout 5m --config .golangci.yml
|
||||
run: make golint
|
||||
# TODO(prometherion): enable back once golangci-lint is built from v1.24 rather than v1.23
|
||||
# uses: golangci/golangci-lint-action@v6.5.2
|
||||
# with:
|
||||
# version: v1.62.2
|
||||
# only-new-issues: false
|
||||
# args: --config .golangci.yml
|
||||
diff:
|
||||
name: diff
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
3
.github/workflows/e2e.yaml
vendored
@@ -33,7 +33,7 @@ on:
|
||||
jobs:
|
||||
kind:
|
||||
name: Kubernetes
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -46,5 +46,6 @@ jobs:
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y golang-cfssl
|
||||
sudo swapoff -a
|
||||
sudo modprobe br_netfilter
|
||||
- name: e2e testing
|
||||
run: make e2e
|
||||
|
||||
6
.github/workflows/helm.yaml
vendored
@@ -2,8 +2,7 @@ name: Helm Chart
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
tags: [ "helm-v*" ]
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
@@ -32,7 +31,8 @@ jobs:
|
||||
- name: Linting Chart
|
||||
run: helm lint ./charts/kamaji
|
||||
release:
|
||||
if: startsWith(github.ref, 'refs/tags/helm-v')
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
needs: [ "lint", "diff" ]
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
23
.github/workflows/pr.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Check PR Title
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
semantic-pr-title:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: amannn/action-semantic-pull-request@v5
|
||||
with:
|
||||
types: |
|
||||
feat
|
||||
fix
|
||||
chore
|
||||
docs
|
||||
style
|
||||
refactor
|
||||
perf
|
||||
test
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
114
.golangci.yml
@@ -1,50 +1,76 @@
|
||||
linters-settings:
|
||||
revive:
|
||||
rules:
|
||||
- name: dot-imports
|
||||
arguments:
|
||||
- allowedPackages:
|
||||
- "github.com/onsi/ginkgo/v2"
|
||||
- "github.com/onsi/gomega"
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(github.com/clastix/kamaji/)
|
||||
goheader:
|
||||
template: |-
|
||||
Copyright 2022 Clastix Labs
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "2"
|
||||
linters:
|
||||
default: all
|
||||
disable:
|
||||
- depguard
|
||||
- wrapcheck
|
||||
- mnd
|
||||
- varnamelen
|
||||
- testpackage
|
||||
- tagliatelle
|
||||
- paralleltest
|
||||
- ireturn
|
||||
- err113
|
||||
- gochecknoglobals
|
||||
- wsl
|
||||
- exhaustive
|
||||
- nosprintfhostport
|
||||
- nonamedreturns
|
||||
- interfacebloat
|
||||
- exhaustruct
|
||||
- lll
|
||||
- gosec
|
||||
- gomoddirectives
|
||||
- godox
|
||||
- gochecknoinits
|
||||
- funlen
|
||||
- dupl
|
||||
- cyclop
|
||||
- depguard
|
||||
- dupl
|
||||
- err113
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- godox
|
||||
- gomoddirectives
|
||||
- gosec
|
||||
- interfacebloat
|
||||
- ireturn
|
||||
- lll
|
||||
- mnd
|
||||
- nestif
|
||||
- nonamedreturns
|
||||
- nosprintfhostport
|
||||
- paralleltest
|
||||
- perfsprint
|
||||
# deprecated linters
|
||||
- exportloopref
|
||||
enable-all: true
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
settings:
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
- -QF1008
|
||||
goheader:
|
||||
template: |-
|
||||
Copyright 2022 Clastix Labs
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
revive:
|
||||
rules:
|
||||
- name: dot-imports
|
||||
arguments:
|
||||
- allowedPackages:
|
||||
- github.com/onsi/ginkgo/v2
|
||||
- github.com/onsi/gomega
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(github.com/clastix/kamaji/)
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
10
ADOPTERS.md
@@ -8,9 +8,13 @@ Feel free to open a Pull-Request to get yours listed.
|
||||
| Type | Name | Since | Website | Use-Case |
|
||||
|:-|:-|:-|:-|:-|
|
||||
| Vendor | Aknostic | 2023 | [link](https://aknostic.com) | Aknostic is a cloud-native consultancy company using Kamaji to build a Kubernetes based PaaS. |
|
||||
| R&D | Aruba | 2024 | [link](https://www.aruba.it/home.aspx) | Aruba Cloud is an Italian Cloud Service Provider evaluating Kamaji to build and offer [Managed Kubernetes Service](https://my.arubacloud.com). |
|
||||
| Vendor | CBWS | 2025 | [link](https://cbws.nl) | CBWS is an European Cloud Provider using Kamaji to build and offer their [Managed Kubernetes Service](https://cbws.nl/cloud/kubernetes/). |
|
||||
| Vendor | DCloud | 2024 | [link](https://dcloud.co.id) | DCloud is an Indonesian Cloud Provider using Kamaji to build and offer [Managed Kubernetes Service](https://dcloud.co.id/dkubes.html). |
|
||||
| Vendor | Dinova | 2025 | [link](https://dinova.one/) | Dinova is an Italian cloud services provider that integrates Kamaji in its datacenters to offer fully managed Kubernetes clusters. |
|
||||
| End-user | KINX | 2024 | [link](https://kinx.net/?lang=en) | KINX is an Internet infrastructure service provider and will use kamaji for its new [Managed Kubernetes Service](https://kinx.net/service/cloud/kubernetes/intro/?lang=en). |
|
||||
| Vendor | Netsons | 2023 | [link](https://www.netsons.com) | Netsons is an Italian hosting and cloud provider and uses Kamaji in its [Managed Kubernetes](https://www.netsons.com/kubernetes) offering. |
|
||||
| Vendor | NVIDIA | 2024 | [link](https://github.com/NVIDIA/doca-platform) | DOCA Platform Framework manages provisioning and service orchestration for NVIDIA Bluefield DPUs. |
|
||||
| R&D | Orange | 2024 | [link](https://gitlab.com/Orange-OpenSource/kanod) | Orange is a French telecommunications company using Kamaji for experimental research purpose, with Kanod research solution. |
|
||||
| Vendor | Platform9 | 2024 | [link](https://elasticmachinepool.com) | Platform9 uses Kamaji in its offering - Elastic Machine Pool, which is a tool for optimizing the cost of running kubernetes clusters in EKS. |
|
||||
| Vendor | Qumulus | 2024 | [link](https://www.qumulus.io) | Qumulus is a cloud provider and plans to use Kamaji for it's hosted Kubernetes service |
|
||||
@@ -18,8 +22,12 @@ Feel free to open a Pull-Request to get yours listed.
|
||||
| End-user | Sicuro Tech Lab | 2024 | [link](https://sicurotechlab.it/) | Sicuro Tech Lab offers cloud infrastructure for Web Agencies and uses kamaji to provide managed k8s services. |
|
||||
| Vendor | Sovereign Cloud Stack | 2024 | [link](https://sovereigncloudstack.org) | Sovereign Cloud Stack develops a standardized cloud platform and uses Kamaji in there Kubernetes-as-a-Service reference implementation |
|
||||
| R&D | TIM | 2024 | [link](https://www.gruppotim.it) | TIM is an Italian telecommunications company using Kamaji for experimental research and development purposes. |
|
||||
| End-user | Tinext Cloud | 2025 | [link](https://cloud.tinext.com) | Tinex Cloud is a Swiss cloud service provider using Kamaji to build their Managed Kubernetes Services. |
|
||||
| Vendor | Ænix | 2023 | [link](https://aenix.io/) | Ænix provides consulting services for cloud providers and uses Kamaji for running Kubernetes-as-a-Service in free PaaS platform [Cozystack](https://cozystack.io). |
|
||||
|
||||
| End-user | Rackspace | 2024 | [link](https://spot.rackspace.com/) | Rackspace Spot uses Kamaji to manage our instances, offering fully-managed kubernetes infrastructure, auctioned in an open market. |
|
||||
| R&D | IONOS Cloud | 2024 | [link](https://cloud.ionos.com/) | IONOS Cloud is a German Cloud Provider evaluating Kamaji for its [Managed Kubernetes platform](https://cloud.ionos.com/managed/kubernetes). |
|
||||
| Vendor | OVHCloud | 2025 | [link](https://www.ovhcloud.com/) | OVHCloud is a European Cloud Provider that will use Kamaji for its Managed Kubernetes Service offer. |
|
||||
| Vendor | WOBCOM GmbH | 2024 | [link](https://www.wobcom.de/) | WOBCOM provides an [**Open Digital Platform**](https://www.wobcom.de/geschaeftskunden/odp/) solution for Smart Cities, which is provided for customers in a Managed Kubernetes provided by Kamaji. |
|
||||
|
||||
### Adopter Types
|
||||
|
||||
|
||||
40
Makefile
@@ -73,47 +73,47 @@ help: ## Display this help.
|
||||
.PHONY: ko
|
||||
ko: $(KO) ## Download ko locally if necessary.
|
||||
$(KO): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/ko || GOBIN=$(LOCALBIN) go install github.com/google/ko@v0.14.1
|
||||
test -s $(LOCALBIN)/ko || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" github.com/google/ko@v0.14.1
|
||||
|
||||
.PHONY: yq
|
||||
yq: $(YQ) ## Download yq locally if necessary.
|
||||
$(YQ): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/yq || GOBIN=$(LOCALBIN) go install github.com/mikefarah/yq/v4@v4.44.2
|
||||
test -s $(LOCALBIN)/yq || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" github.com/mikefarah/yq/v4@v4.44.2
|
||||
|
||||
.PHONY: helm
|
||||
helm: $(HELM) ## Download helm locally if necessary.
|
||||
$(HELM): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/helm || GOBIN=$(LOCALBIN) go install helm.sh/helm/v3/cmd/helm@v3.9.0
|
||||
test -s $(LOCALBIN)/helm || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" helm.sh/helm/v3/cmd/helm@v3.9.0
|
||||
|
||||
.PHONY: ginkgo
|
||||
ginkgo: $(GINKGO) ## Download ginkgo locally if necessary.
|
||||
$(GINKGO): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/ginkgo || GOBIN=$(LOCALBIN) go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
test -s $(LOCALBIN)/ginkgo || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
.PHONY: kind
|
||||
kind: $(KIND) ## Download kind locally if necessary.
|
||||
$(KIND): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/kind || GOBIN=$(LOCALBIN) go install sigs.k8s.io/kind/cmd/kind@v0.14.0
|
||||
test -s $(LOCALBIN)/kind || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" sigs.k8s.io/kind/cmd/kind@v0.14.0
|
||||
|
||||
.PHONY: controller-gen
|
||||
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
|
||||
$(CONTROLLER_GEN): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/controller-gen || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.1
|
||||
test -s $(LOCALBIN)/controller-gen || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.1
|
||||
|
||||
.PHONY: golangci-lint
|
||||
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
|
||||
$(GOLANGCI_LINT): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/golangci-lint || GOBIN=$(LOCALBIN) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.2
|
||||
test -s $(LOCALBIN)/golangci-lint || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.0.2
|
||||
|
||||
.PHONY: apidocs-gen
|
||||
apidocs-gen: $(APIDOCS_GEN) ## Download crdoc locally if necessary.
|
||||
$(APIDOCS_GEN): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/crdoc || GOBIN=$(LOCALBIN) go install fybrik.io/crdoc@latest
|
||||
test -s $(LOCALBIN)/crdoc || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" fybrik.io/crdoc@latest
|
||||
|
||||
.PHONY: envtest
|
||||
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
|
||||
$(ENVTEST): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION)
|
||||
test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION)
|
||||
|
||||
##@ Development
|
||||
|
||||
@@ -144,11 +144,10 @@ golint: golangci-lint ## Linting the code according to the styling guide.
|
||||
## Run unit tests (all tests except E2E).
|
||||
.PHONY: test
|
||||
test: envtest ginkgo
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" $(GINKGO) -r -v --trace \
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" $(GINKGO) -r -v -coverprofile cover.out --trace \
|
||||
./api/... \
|
||||
./cmd/... \
|
||||
./internal/... \
|
||||
-coverprofile cover.out
|
||||
|
||||
_datastore-mysql:
|
||||
$(MAKE) NAME=$(NAME) -C deploy/kine/mysql mariadb
|
||||
@@ -229,8 +228,8 @@ metallb:
|
||||
cat hack/metallb.yaml | sed -E "s|172.19|$$(docker network inspect -f '{{range .IPAM.Config}}{{.Gateway}}{{end}}' kind | sed -E 's|^([0-9]+\.[0-9]+)\..*$$|\1|g')|g" | kubectl apply -f -
|
||||
|
||||
cert-manager:
|
||||
$(HELM) repo add bitnami https://charts.bitnami.com/bitnami
|
||||
$(HELM) upgrade --install cert-manager bitnami/cert-manager --namespace certmanager-system --create-namespace --set "installCRDs=true"
|
||||
$(HELM) repo add jetstack https://charts.jetstack.io
|
||||
$(HELM) upgrade --install cert-manager jetstack/cert-manager --namespace certmanager-system --create-namespace --set "installCRDs=true"
|
||||
|
||||
load: kind
|
||||
$(KIND) load docker-image --name kamaji ${CONTAINER_REPOSITORY}:${VERSION}
|
||||
@@ -238,8 +237,8 @@ load: kind
|
||||
##@ e2e
|
||||
|
||||
.PHONY: env
|
||||
env:
|
||||
@make -C deploy/kind kind ingress-nginx
|
||||
env: kind
|
||||
$(KIND) create cluster --name kamaji
|
||||
|
||||
.PHONY: e2e
|
||||
e2e: env build load helm ginkgo cert-manager ## Create a KinD cluster, install Kamaji on it and run the test suite.
|
||||
@@ -251,6 +250,15 @@ e2e: env build load helm ginkgo cert-manager ## Create a KinD cluster, install K
|
||||
|
||||
##@ Document
|
||||
|
||||
CAPI_URL = https://github.com/clastix/cluster-api-control-plane-provider-kamaji.git
|
||||
CAPI_DIR := $(shell mktemp -d)
|
||||
CRDS_DIR := $(shell mktemp -d)
|
||||
|
||||
.PHONY: apidoc
|
||||
apidoc: apidocs-gen
|
||||
$(APIDOCS_GEN) crdoc --resources charts/kamaji/crds --output docs/content/reference/api.md --template docs/templates/reference-cr.tmpl
|
||||
@cp charts/kamaji/crds/*.yaml $(CRDS_DIR)
|
||||
@git clone $(CAPI_URL) $(CAPI_DIR)
|
||||
@cp $(CAPI_DIR)/config/crd/bases/*.yaml $(CRDS_DIR)
|
||||
@rm -rf $(CAPI_DIR)
|
||||
$(APIDOCS_GEN) crdoc --resources $(CRDS_DIR) --output docs/content/reference/api.md --template docs/templates/reference-cr.tmpl
|
||||
@rm -rf $(CRDS_DIR)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
)
|
||||
|
||||
//+kubebuilder:validation:Enum=etcd;MySQL;PostgreSQL;NATS
|
||||
//+kubebuilder:validation:XValidation:rule="self == oldSelf",message="Datastore driver is immutable"
|
||||
|
||||
type Driver string
|
||||
|
||||
@@ -24,6 +25,13 @@ var (
|
||||
type Endpoints []string
|
||||
|
||||
// DataStoreSpec defines the desired state of DataStore.
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver == \"etcd\") ? (self.tlsConfig != null && (has(self.tlsConfig.certificateAuthority.privateKey.secretReference) || has(self.tlsConfig.certificateAuthority.privateKey.content))) : true", message="certificateAuthority privateKey must have secretReference or content when driver is etcd"
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver == \"etcd\") ? (self.tlsConfig != null && (has(self.tlsConfig.clientCertificate.certificate.secretReference) || has(self.tlsConfig.clientCertificate.certificate.content))) : true", message="clientCertificate must have secretReference or content when driver is etcd"
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver == \"etcd\") ? (self.tlsConfig != null && (has(self.tlsConfig.clientCertificate.privateKey.secretReference) || has(self.tlsConfig.clientCertificate.privateKey.content))) : true", message="clientCertificate privateKey must have secretReference or content when driver is etcd"
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver != \"etcd\" && has(self.tlsConfig) && has(self.tlsConfig.clientCertificate)) ? (((has(self.tlsConfig.clientCertificate.certificate.secretReference) || has(self.tlsConfig.clientCertificate.certificate.content)))) : true", message="When driver is not etcd and tlsConfig exists, clientCertificate must be null or contain valid content"
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver != \"etcd\" && has(self.basicAuth)) ? ((has(self.basicAuth.username.secretReference) || has(self.basicAuth.username.content))) : true", message="When driver is not etcd and basicAuth exists, username must have secretReference or content"
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver != \"etcd\" && has(self.basicAuth)) ? ((has(self.basicAuth.password.secretReference) || has(self.basicAuth.password.content))) : true", message="When driver is not etcd and basicAuth exists, password must have secretReference or content"
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver != \"etcd\") ? (has(self.tlsConfig) || has(self.basicAuth)) : true", message="When driver is not etcd, either tlsConfig or basicAuth must be provided"
|
||||
type DataStoreSpec struct {
|
||||
// The driver to use to connect to the shared datastore.
|
||||
Driver Driver `json:"driver"`
|
||||
|
||||
@@ -52,12 +52,14 @@ func (d *DatastoreUsedSecret) ExtractValue() client.IndexerFunc {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.CertificateAuthority.PrivateKey.SecretRef))
|
||||
}
|
||||
|
||||
if ds.Spec.TLSConfig.ClientCertificate.Certificate.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.ClientCertificate.Certificate.SecretRef))
|
||||
}
|
||||
if ds.Spec.TLSConfig.ClientCertificate != nil {
|
||||
if ds.Spec.TLSConfig.ClientCertificate.Certificate.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.ClientCertificate.Certificate.SecretRef))
|
||||
}
|
||||
|
||||
if ds.Spec.TLSConfig.ClientCertificate.PrivateKey.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.ClientCertificate.PrivateKey.SecretRef))
|
||||
if ds.Spec.TLSConfig.ClientCertificate.PrivateKey.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.ClientCertificate.PrivateKey.SecretRef))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -183,11 +183,12 @@ type KubernetesStatus struct {
|
||||
Ingress *KubernetesIngressStatus `json:"ingress,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:validation:Enum=Provisioning;CertificateAuthorityRotating;Upgrading;Migrating;Ready;NotReady
|
||||
// +kubebuilder:validation:Enum=Provisioning;CertificateAuthorityRotating;Upgrading;Migrating;Ready;NotReady;Sleeping
|
||||
type KubernetesVersionStatus string
|
||||
|
||||
var (
|
||||
VersionProvisioning KubernetesVersionStatus = "Provisioning"
|
||||
VersionSleeping KubernetesVersionStatus = "Sleeping"
|
||||
VersionCARotating KubernetesVersionStatus = "CertificateAuthorityRotating"
|
||||
VersionUpgrading KubernetesVersionStatus = "Upgrading"
|
||||
VersionMigrating KubernetesVersionStatus = "Migrating"
|
||||
|
||||
@@ -67,11 +67,12 @@ const (
|
||||
|
||||
type KubeletSpec struct {
|
||||
// Ordered list of the preferred NodeAddressTypes to use for kubelet connections.
|
||||
// Default to Hostname, InternalIP, ExternalIP.
|
||||
//+kubebuilder:default={"Hostname","InternalIP","ExternalIP"}
|
||||
// Default to InternalIP, ExternalIP, Hostname.
|
||||
//+kubebuilder:default={"InternalIP","ExternalIP","Hostname"}
|
||||
//+kubebuilder:validation:MinItems=1
|
||||
//+listType=set
|
||||
PreferredAddressTypes []KubeletPreferredAddressType `json:"preferredAddressTypes,omitempty"`
|
||||
// CGroupFS defines the cgroup driver for Kubelet
|
||||
// CGroupFS defines the cgroup driver for Kubelet
|
||||
// https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/
|
||||
CGroupFS CGroupDriver `json:"cgroupfs,omitempty"`
|
||||
}
|
||||
@@ -304,6 +305,7 @@ type TenantControlPlaneSpec struct {
|
||||
//+kubebuilder:subresource:scale:specpath=.spec.controlPlane.deployment.replicas,statuspath=.status.kubernetesResources.deployment.replicas,selectorpath=.status.kubernetesResources.deployment.selector
|
||||
//+kubebuilder:resource:categories=kamaji,shortName=tcp
|
||||
//+kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.kubernetes.version",description="Kubernetes version"
|
||||
//+kubebuilder:printcolumn:name="Installed Version",type="string",JSONPath=".status.kubernetesResources.version.version",description="The actual installed Kubernetes version from status"
|
||||
//+kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.kubernetesResources.version.status",description="Status"
|
||||
//+kubebuilder:printcolumn:name="Control-Plane endpoint",type="string",JSONPath=".status.controlPlaneEndpoint",description="Tenant Control Plane Endpoint (API server)"
|
||||
//+kubebuilder:printcolumn:name="Kubeconfig",type="string",JSONPath=".status.kubeconfig.admin.secretName",description="Secret which contains admin kubeconfig"
|
||||
|
||||
@@ -19,7 +19,7 @@ var _ = Describe("Cluster controller", func() {
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx = context.Background() //nolint:fatcontext
|
||||
ctx = context.Background()
|
||||
tcp = &TenantControlPlane{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "tcp",
|
||||
|
||||
177
api/v1alpha1/validations_test.go
Normal file
@@ -0,0 +1,177 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var _ = Describe("Datastores validation test", func() {
|
||||
var (
|
||||
ctx context.Context
|
||||
ds *DataStore
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx = context.Background()
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ds",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: DataStoreSpec{},
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if err := k8sClient.Delete(ctx, ds); err != nil && !apierrors.IsNotFound(err) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
Context("DataStores fields", func() {
|
||||
It("datastores of type ETCD must have their TLS configurations set correctly", func() {
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-etcd",
|
||||
},
|
||||
Spec: DataStoreSpec{
|
||||
Driver: "etcd",
|
||||
Endpoints: []string{"etcd-server:2379"},
|
||||
TLSConfig: &TLSConfig{
|
||||
CertificateAuthority: CertKeyPair{},
|
||||
ClientCertificate: &ClientCertificate{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, ds)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("certificateAuthority privateKey must have secretReference or content when driver is etcd"))
|
||||
})
|
||||
|
||||
It("valid ETCD DataStore should be created", func() {
|
||||
var (
|
||||
cert = []byte("cert")
|
||||
key = []byte("privkey")
|
||||
)
|
||||
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "good-etcd",
|
||||
},
|
||||
Spec: DataStoreSpec{
|
||||
Driver: "etcd",
|
||||
Endpoints: []string{"etcd-server:2379"},
|
||||
TLSConfig: &TLSConfig{
|
||||
CertificateAuthority: CertKeyPair{
|
||||
Certificate: ContentRef{
|
||||
Content: cert,
|
||||
},
|
||||
PrivateKey: &ContentRef{
|
||||
Content: key,
|
||||
},
|
||||
},
|
||||
ClientCertificate: &ClientCertificate{
|
||||
Certificate: ContentRef{
|
||||
Content: cert,
|
||||
},
|
||||
PrivateKey: ContentRef{
|
||||
Content: key,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, ds)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("datastores of type PostgreSQL must have either basicAuth or tlsConfig", func() {
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-pg",
|
||||
},
|
||||
Spec: DataStoreSpec{
|
||||
Driver: "PostgreSQL",
|
||||
Endpoints: []string{"pg-server:5432"},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, ds)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("When driver is not etcd, either tlsConfig or basicAuth must be provided"))
|
||||
})
|
||||
|
||||
It("datastores of type PostgreSQL can have basicAuth", func() {
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "good-pg",
|
||||
},
|
||||
Spec: DataStoreSpec{
|
||||
Driver: "PostgreSQL",
|
||||
Endpoints: []string{"pg-server:5432"},
|
||||
BasicAuth: &BasicAuth{
|
||||
Username: ContentRef{
|
||||
Content: []byte("postgres"),
|
||||
},
|
||||
Password: ContentRef{
|
||||
Content: []byte("postgres"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, ds)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("datastores of type PostgreSQL must have tlsConfig with proper content", func() {
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-pg",
|
||||
},
|
||||
Spec: DataStoreSpec{
|
||||
Driver: "PostgreSQL",
|
||||
Endpoints: []string{"pg-server:5432"},
|
||||
TLSConfig: &TLSConfig{
|
||||
ClientCertificate: &ClientCertificate{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(context.Background(), ds)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("When driver is not etcd and tlsConfig exists, clientCertificate must be null or contain valid content"))
|
||||
})
|
||||
|
||||
It("datastores of type PostgreSQL need a proper clientCertificate", func() {
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "good-pg",
|
||||
},
|
||||
Spec: DataStoreSpec{
|
||||
Driver: "PostgreSQL",
|
||||
Endpoints: []string{"pg-server:5432"},
|
||||
TLSConfig: &TLSConfig{
|
||||
ClientCertificate: &ClientCertificate{
|
||||
Certificate: ContentRef{
|
||||
Content: []byte("cert"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(context.Background(), ds)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1110,6 +1110,11 @@ func (in *NetworkProfileSpec) DeepCopyInto(out *NetworkProfileSpec) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.LoadBalancerClass != nil {
|
||||
in, out := &in.LoadBalancerClass, &out.LoadBalancerClass
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.CertSANs != nil {
|
||||
in, out := &in.CertSANs, &out.CertSANs
|
||||
*out = make([]string, len(*in))
|
||||
|
||||
@@ -21,3 +21,8 @@
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
# Helm source files
|
||||
README.md.gotmpl
|
||||
.helmignore
|
||||
# Build tools
|
||||
Makefile
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
dependencies:
|
||||
- name: kamaji-etcd
|
||||
repository: https://clastix.github.io/charts
|
||||
version: 0.8.1
|
||||
digest: sha256:381d8ef9619c2daeea37e40c6a9772ae3e5cee80887148879db04e887d5364ad
|
||||
generated: "2024-10-25T19:28:40.880766186+02:00"
|
||||
version: 0.11.0
|
||||
digest: sha256:96b4115b8c02f771f809ec1bed3be3a3903e7e8315d6966aa54b0f73230ea421
|
||||
generated: "2025-07-03T09:19:19.835421461+02:00"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
apiVersion: v2
|
||||
appVersion: v0.0.0
|
||||
appVersion: latest
|
||||
description: Kamaji is the Hosted Control Plane Manager for Kubernetes.
|
||||
home: https://github.com/clastix/kamaji
|
||||
icon: https://github.com/clastix/kamaji/raw/master/assets/logo-colored.png
|
||||
@@ -17,11 +17,11 @@ name: kamaji
|
||||
sources:
|
||||
- https://github.com/clastix/kamaji
|
||||
type: application
|
||||
version: 0.0.0
|
||||
version: 0.0.0+latest
|
||||
dependencies:
|
||||
- name: kamaji-etcd
|
||||
repository: https://clastix.github.io/charts
|
||||
version: ">=0.8.1"
|
||||
version: ">=0.11.0"
|
||||
condition: kamaji-etcd.deploy
|
||||
annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
@@ -46,4 +46,5 @@ annotations:
|
||||
artifacthub.io/operator: "true"
|
||||
artifacthub.io/operatorCapabilities: "full lifecycle"
|
||||
artifacthub.io/changes: |
|
||||
- Using dependency chart `kamaji-etcd` as a default DataStore.
|
||||
- kind: added
|
||||
description: Releasing latest chart at every push
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# kamaji
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
Kamaji is the Hosted Control Plane Manager for Kubernetes.
|
||||
|
||||
@@ -22,7 +22,7 @@ Kubernetes: `>=1.21.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://clastix.github.io/charts | kamaji-etcd | >=0.8.1 |
|
||||
| https://clastix.github.io/charts | kamaji-etcd | >=0.11.0 |
|
||||
|
||||
[Kamaji](https://github.com/clastix/kamaji) requires a [multi-tenant `etcd`](https://github.com/clastix/kamaji-internal/blob/master/deploy/getting-started-with-kamaji.md#setup-internal-multi-tenant-etcd) cluster.
|
||||
This Helm Chart starting from v0.1.1 provides the installation of an internal `etcd` in order to streamline the local test. If you'd like to use an externally managed etcd instance, you can specify the overrides and by setting the value `etcd.deploy=false`.
|
||||
@@ -31,9 +31,13 @@ This Helm Chart starting from v0.1.1 provides the installation of an internal `e
|
||||
|
||||
## Install Kamaji
|
||||
|
||||
To add clastix helm repository:
|
||||
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
|
||||
To install the Chart with the release name `kamaji`:
|
||||
|
||||
helm upgrade --install --namespace kamaji-system --create-namespace clastix/kamaji
|
||||
helm upgrade --install --namespace kamaji-system --create-namespace kamaji clastix/kamaji
|
||||
|
||||
Show the status:
|
||||
|
||||
@@ -78,10 +82,7 @@ Here the values you can override:
|
||||
| image.repository | string | `"clastix/kamaji"` | The container image of the Kamaji controller. |
|
||||
| image.tag | string | `nil` | Overrides the image tag whose default is the chart appVersion. |
|
||||
| imagePullSecrets | list | `[]` | |
|
||||
| kamaji-etcd.datastore.enabled | bool | `true` | |
|
||||
| kamaji-etcd.datastore.name | string | `"default"` | |
|
||||
| kamaji-etcd.deploy | bool | `true` | |
|
||||
| kamaji-etcd.fullnameOverride | string | `"kamaji-etcd"` | |
|
||||
| kamaji-etcd | object | `{"clusterDomain":"cluster.local","datastore":{"enabled":true,"name":"default"},"deploy":true,"fullnameOverride":"kamaji-etcd"}` | Subchart: See https://github.com/clastix/kamaji-etcd/blob/master/charts/kamaji-etcd/values.yaml |
|
||||
| livenessProbe | object | `{"httpGet":{"path":"/healthz","port":"healthcheck"},"initialDelaySeconds":15,"periodSeconds":20}` | The livenessProbe for the controller container |
|
||||
| loggingDevel.enable | bool | `false` | Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default false) |
|
||||
| metricsBindAddress | string | `":8080"` | The address the metric endpoint binds to. (default ":8080") |
|
||||
|
||||
@@ -18,10 +18,15 @@ This Helm Chart starting from v0.1.1 provides the installation of an internal `e
|
||||
|
||||
## Install Kamaji
|
||||
|
||||
To add clastix helm repository:
|
||||
|
||||
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
|
||||
To install the Chart with the release name `kamaji`:
|
||||
|
||||
|
||||
helm upgrade --install --namespace kamaji-system --create-namespace clastix/kamaji
|
||||
helm upgrade --install --namespace kamaji-system --create-namespace kamaji clastix/kamaji
|
||||
|
||||
Show the status:
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
# Kamaji
|
||||
|
||||
Kamaji deploys and operates Kubernetes at scale with a fraction of the operational burden.
|
||||
|
||||
Useful links:
|
||||
- [Kamaji Github repository](https://github.com/clastix/kamaji)
|
||||
- [Kamaji Documentation](https://kamaji.clastix.io)
|
||||
|
||||
## Requirements
|
||||
|
||||
* Kubernetes v1.22+
|
||||
* Helm v3
|
||||
@@ -120,6 +120,9 @@ spec:
|
||||
- PostgreSQL
|
||||
- NATS
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: Datastore driver is immutable
|
||||
rule: self == oldSelf
|
||||
endpoints:
|
||||
description: |-
|
||||
List of the endpoints to connect to the shared datastore.
|
||||
@@ -263,6 +266,21 @@ spec:
|
||||
- driver
|
||||
- endpoints
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: certificateAuthority privateKey must have secretReference or content when driver is etcd
|
||||
rule: '(self.driver == "etcd") ? (self.tlsConfig != null && (has(self.tlsConfig.certificateAuthority.privateKey.secretReference) || has(self.tlsConfig.certificateAuthority.privateKey.content))) : true'
|
||||
- message: clientCertificate must have secretReference or content when driver is etcd
|
||||
rule: '(self.driver == "etcd") ? (self.tlsConfig != null && (has(self.tlsConfig.clientCertificate.certificate.secretReference) || has(self.tlsConfig.clientCertificate.certificate.content))) : true'
|
||||
- message: clientCertificate privateKey must have secretReference or content when driver is etcd
|
||||
rule: '(self.driver == "etcd") ? (self.tlsConfig != null && (has(self.tlsConfig.clientCertificate.privateKey.secretReference) || has(self.tlsConfig.clientCertificate.privateKey.content))) : true'
|
||||
- message: When driver is not etcd and tlsConfig exists, clientCertificate must be null or contain valid content
|
||||
rule: '(self.driver != "etcd" && has(self.tlsConfig) && has(self.tlsConfig.clientCertificate)) ? (((has(self.tlsConfig.clientCertificate.certificate.secretReference) || has(self.tlsConfig.clientCertificate.certificate.content)))) : true'
|
||||
- message: When driver is not etcd and basicAuth exists, username must have secretReference or content
|
||||
rule: '(self.driver != "etcd" && has(self.basicAuth)) ? ((has(self.basicAuth.username.secretReference) || has(self.basicAuth.username.content))) : true'
|
||||
- message: When driver is not etcd and basicAuth exists, password must have secretReference or content
|
||||
rule: '(self.driver != "etcd" && has(self.basicAuth)) ? ((has(self.basicAuth.password.secretReference) || has(self.basicAuth.password.content))) : true'
|
||||
- message: When driver is not etcd, either tlsConfig or basicAuth must be provided
|
||||
rule: '(self.driver != "etcd") ? (has(self.tlsConfig) || has(self.basicAuth)) : true'
|
||||
status:
|
||||
description: DataStoreStatus defines the observed state of DataStore.
|
||||
properties:
|
||||
|
||||
@@ -23,6 +23,10 @@ spec:
|
||||
jsonPath: .spec.kubernetes.version
|
||||
name: Version
|
||||
type: string
|
||||
- description: The actual installed Kubernetes version from status
|
||||
jsonPath: .status.kubernetesResources.version.version
|
||||
name: Installed Version
|
||||
type: string
|
||||
- description: Status
|
||||
jsonPath: .status.kubernetesResources.version.status
|
||||
name: Status
|
||||
@@ -427,7 +431,7 @@ spec:
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -447,7 +451,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
description: Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -497,7 +501,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
@@ -512,7 +516,7 @@ spec:
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request to perform.
|
||||
description: HTTPGet specifies an HTTP GET request to perform.
|
||||
properties:
|
||||
host:
|
||||
description: |-
|
||||
@@ -559,7 +563,7 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
sleep:
|
||||
description: Sleep represents the duration that the container should sleep before being terminated.
|
||||
description: Sleep represents a duration that the container should sleep.
|
||||
properties:
|
||||
seconds:
|
||||
description: Seconds is the number of seconds to sleep.
|
||||
@@ -571,8 +575,8 @@ spec:
|
||||
tcpSocket:
|
||||
description: |-
|
||||
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
|
||||
for the backward compatibility. There are no validation of this field and
|
||||
lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
for backward compatibility. There is no validation of this field and
|
||||
lifecycle hooks will fail at runtime when it is specified.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect to, defaults to the pod IP.'
|
||||
@@ -603,7 +607,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
@@ -618,7 +622,7 @@ spec:
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request to perform.
|
||||
description: HTTPGet specifies an HTTP GET request to perform.
|
||||
properties:
|
||||
host:
|
||||
description: |-
|
||||
@@ -665,7 +669,7 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
sleep:
|
||||
description: Sleep represents the duration that the container should sleep before being terminated.
|
||||
description: Sleep represents a duration that the container should sleep.
|
||||
properties:
|
||||
seconds:
|
||||
description: Seconds is the number of seconds to sleep.
|
||||
@@ -677,8 +681,8 @@ spec:
|
||||
tcpSocket:
|
||||
description: |-
|
||||
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
|
||||
for the backward compatibility. There are no validation of this field and
|
||||
lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
for backward compatibility. There is no validation of this field and
|
||||
lifecycle hooks will fail at runtime when it is specified.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect to, defaults to the pod IP.'
|
||||
@@ -696,6 +700,12 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -705,7 +715,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
@@ -726,7 +736,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a GRPC port.
|
||||
description: GRPC specifies a GRPC HealthCheckRequest.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
@@ -744,7 +754,7 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request to perform.
|
||||
description: HTTPGet specifies an HTTP GET request to perform.
|
||||
properties:
|
||||
host:
|
||||
description: |-
|
||||
@@ -809,7 +819,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
tcpSocket:
|
||||
description: TCPSocket specifies an action involving a TCP port.
|
||||
description: TCPSocket specifies a connection to a TCP port.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect to, defaults to the pod IP.'
|
||||
@@ -911,7 +921,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
@@ -932,7 +942,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a GRPC port.
|
||||
description: GRPC specifies a GRPC HealthCheckRequest.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
@@ -950,7 +960,7 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request to perform.
|
||||
description: HTTPGet specifies an HTTP GET request to perform.
|
||||
properties:
|
||||
host:
|
||||
description: |-
|
||||
@@ -1015,7 +1025,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
tcpSocket:
|
||||
description: TCPSocket specifies an action involving a TCP port.
|
||||
description: TCPSocket specifies a connection to a TCP port.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect to, defaults to the pod IP.'
|
||||
@@ -1354,7 +1364,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
@@ -1375,7 +1385,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a GRPC port.
|
||||
description: GRPC specifies a GRPC HealthCheckRequest.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
@@ -1393,7 +1403,7 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request to perform.
|
||||
description: HTTPGet specifies an HTTP GET request to perform.
|
||||
properties:
|
||||
host:
|
||||
description: |-
|
||||
@@ -1458,7 +1468,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
tcpSocket:
|
||||
description: TCPSocket specifies an action involving a TCP port.
|
||||
description: TCPSocket specifies a connection to a TCP port.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect to, defaults to the pod IP.'
|
||||
@@ -1792,7 +1802,7 @@ spec:
|
||||
Values defined by an Env with a duplicate key will take precedence.
|
||||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps
|
||||
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
@@ -1812,7 +1822,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
description: Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
@@ -1862,7 +1872,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
@@ -1877,7 +1887,7 @@ spec:
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request to perform.
|
||||
description: HTTPGet specifies an HTTP GET request to perform.
|
||||
properties:
|
||||
host:
|
||||
description: |-
|
||||
@@ -1924,7 +1934,7 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
sleep:
|
||||
description: Sleep represents the duration that the container should sleep before being terminated.
|
||||
description: Sleep represents a duration that the container should sleep.
|
||||
properties:
|
||||
seconds:
|
||||
description: Seconds is the number of seconds to sleep.
|
||||
@@ -1936,8 +1946,8 @@ spec:
|
||||
tcpSocket:
|
||||
description: |-
|
||||
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
|
||||
for the backward compatibility. There are no validation of this field and
|
||||
lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
for backward compatibility. There is no validation of this field and
|
||||
lifecycle hooks will fail at runtime when it is specified.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect to, defaults to the pod IP.'
|
||||
@@ -1968,7 +1978,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
@@ -1983,7 +1993,7 @@ spec:
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request to perform.
|
||||
description: HTTPGet specifies an HTTP GET request to perform.
|
||||
properties:
|
||||
host:
|
||||
description: |-
|
||||
@@ -2030,7 +2040,7 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
sleep:
|
||||
description: Sleep represents the duration that the container should sleep before being terminated.
|
||||
description: Sleep represents a duration that the container should sleep.
|
||||
properties:
|
||||
seconds:
|
||||
description: Seconds is the number of seconds to sleep.
|
||||
@@ -2042,8 +2052,8 @@ spec:
|
||||
tcpSocket:
|
||||
description: |-
|
||||
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
|
||||
for the backward compatibility. There are no validation of this field and
|
||||
lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
for backward compatibility. There is no validation of this field and
|
||||
lifecycle hooks will fail at runtime when it is specified.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect to, defaults to the pod IP.'
|
||||
@@ -2061,6 +2071,12 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
@@ -2070,7 +2086,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
@@ -2091,7 +2107,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a GRPC port.
|
||||
description: GRPC specifies a GRPC HealthCheckRequest.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
@@ -2109,7 +2125,7 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request to perform.
|
||||
description: HTTPGet specifies an HTTP GET request to perform.
|
||||
properties:
|
||||
host:
|
||||
description: |-
|
||||
@@ -2174,7 +2190,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
tcpSocket:
|
||||
description: TCPSocket specifies an action involving a TCP port.
|
||||
description: TCPSocket specifies a connection to a TCP port.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect to, defaults to the pod IP.'
|
||||
@@ -2276,7 +2292,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
@@ -2297,7 +2313,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a GRPC port.
|
||||
description: GRPC specifies a GRPC HealthCheckRequest.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
@@ -2315,7 +2331,7 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request to perform.
|
||||
description: HTTPGet specifies an HTTP GET request to perform.
|
||||
properties:
|
||||
host:
|
||||
description: |-
|
||||
@@ -2380,7 +2396,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
tcpSocket:
|
||||
description: TCPSocket specifies an action involving a TCP port.
|
||||
description: TCPSocket specifies a connection to a TCP port.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect to, defaults to the pod IP.'
|
||||
@@ -2719,7 +2735,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
@@ -2740,7 +2756,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a GRPC port.
|
||||
description: GRPC specifies a GRPC HealthCheckRequest.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
@@ -2758,7 +2774,7 @@ spec:
|
||||
- port
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request to perform.
|
||||
description: HTTPGet specifies an HTTP GET request to perform.
|
||||
properties:
|
||||
host:
|
||||
description: |-
|
||||
@@ -2823,7 +2839,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
tcpSocket:
|
||||
description: TCPSocket specifies an action involving a TCP port.
|
||||
description: TCPSocket specifies a connection to a TCP port.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect to, defaults to the pod IP.'
|
||||
@@ -3214,6 +3230,8 @@ spec:
|
||||
description: |-
|
||||
awsElasticBlockStore represents an AWS Disk resource that is attached to a
|
||||
kubelet's host machine and then exposed to the pod.
|
||||
Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
|
||||
awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
|
||||
properties:
|
||||
fsType:
|
||||
@@ -3245,7 +3263,10 @@ spec:
|
||||
- volumeID
|
||||
type: object
|
||||
azureDisk:
|
||||
description: azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
|
||||
description: |-
|
||||
azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
|
||||
Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
|
||||
are redirected to the disk.csi.azure.com CSI driver.
|
||||
properties:
|
||||
cachingMode:
|
||||
description: 'cachingMode is the Host Caching mode: None, Read Only, Read Write.'
|
||||
@@ -3277,7 +3298,10 @@ spec:
|
||||
- diskURI
|
||||
type: object
|
||||
azureFile:
|
||||
description: azureFile represents an Azure File Service mount on the host and bind mount to the pod.
|
||||
description: |-
|
||||
azureFile represents an Azure File Service mount on the host and bind mount to the pod.
|
||||
Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
|
||||
are redirected to the file.csi.azure.com CSI driver.
|
||||
properties:
|
||||
readOnly:
|
||||
description: |-
|
||||
@@ -3295,7 +3319,9 @@ spec:
|
||||
- shareName
|
||||
type: object
|
||||
cephfs:
|
||||
description: cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
|
||||
description: |-
|
||||
cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
|
||||
Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
|
||||
properties:
|
||||
monitors:
|
||||
description: |-
|
||||
@@ -3346,6 +3372,8 @@ spec:
|
||||
cinder:
|
||||
description: |-
|
||||
cinder represents a cinder volume attached and mounted on kubelets host machine.
|
||||
Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
|
||||
are redirected to the cinder.csi.openstack.org CSI driver.
|
||||
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
|
||||
properties:
|
||||
fsType:
|
||||
@@ -3452,7 +3480,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
csi:
|
||||
description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
|
||||
description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.
|
||||
properties:
|
||||
driver:
|
||||
description: |-
|
||||
@@ -3894,6 +3922,7 @@ spec:
|
||||
description: |-
|
||||
flexVolume represents a generic volume resource that is
|
||||
provisioned/attached using an exec based plugin.
|
||||
Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
|
||||
properties:
|
||||
driver:
|
||||
description: driver is the name of the driver to use for this volume.
|
||||
@@ -3937,7 +3966,9 @@ spec:
|
||||
- driver
|
||||
type: object
|
||||
flocker:
|
||||
description: flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
|
||||
description: |-
|
||||
flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
|
||||
Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
|
||||
properties:
|
||||
datasetName:
|
||||
description: |-
|
||||
@@ -3952,6 +3983,8 @@ spec:
|
||||
description: |-
|
||||
gcePersistentDisk represents a GCE Disk resource that is attached to a
|
||||
kubelet's host machine and then exposed to the pod.
|
||||
Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
|
||||
gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
|
||||
properties:
|
||||
fsType:
|
||||
@@ -3987,7 +4020,7 @@ spec:
|
||||
gitRepo:
|
||||
description: |-
|
||||
gitRepo represents a git repository at a particular revision.
|
||||
DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
|
||||
Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
|
||||
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
|
||||
into the Pod's container.
|
||||
properties:
|
||||
@@ -4010,6 +4043,7 @@ spec:
|
||||
glusterfs:
|
||||
description: |-
|
||||
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
|
||||
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md
|
||||
properties:
|
||||
endpoints:
|
||||
@@ -4069,7 +4103,7 @@ spec:
|
||||
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
|
||||
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
|
||||
The volume will be mounted read-only (ro) and non-executable files (noexec).
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
|
||||
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
|
||||
properties:
|
||||
pullPolicy:
|
||||
@@ -4216,7 +4250,9 @@ spec:
|
||||
- claimName
|
||||
type: object
|
||||
photonPersistentDisk:
|
||||
description: photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
|
||||
description: |-
|
||||
photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
|
||||
Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
@@ -4231,7 +4267,11 @@ spec:
|
||||
- pdID
|
||||
type: object
|
||||
portworxVolume:
|
||||
description: portworxVolume represents a portworx volume attached and mounted on kubelets host machine
|
||||
description: |-
|
||||
portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
|
||||
Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
|
||||
are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
|
||||
is on.
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
@@ -4566,7 +4606,9 @@ spec:
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
quobyte:
|
||||
description: quobyte represents a Quobyte mount on the host that shares a pod's lifetime
|
||||
description: |-
|
||||
quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
|
||||
Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
|
||||
properties:
|
||||
group:
|
||||
description: |-
|
||||
@@ -4604,6 +4646,7 @@ spec:
|
||||
rbd:
|
||||
description: |-
|
||||
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
|
||||
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/rbd/README.md
|
||||
properties:
|
||||
fsType:
|
||||
@@ -4676,7 +4719,9 @@ spec:
|
||||
- monitors
|
||||
type: object
|
||||
scaleIO:
|
||||
description: scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
|
||||
description: |-
|
||||
scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
|
||||
Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
|
||||
properties:
|
||||
fsType:
|
||||
default: xfs
|
||||
@@ -4802,7 +4847,9 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
storageos:
|
||||
description: storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
|
||||
description: |-
|
||||
storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
|
||||
Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
@@ -4847,7 +4894,10 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
vsphereVolume:
|
||||
description: vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
|
||||
description: |-
|
||||
vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
|
||||
Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
|
||||
are redirected to the csi.vsphere.vmware.com CSI driver.
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
@@ -5139,7 +5189,6 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -5154,7 +5203,6 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -5315,7 +5363,6 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -5330,7 +5377,6 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -5484,7 +5530,6 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -5499,7 +5544,6 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -5660,7 +5704,6 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -5675,7 +5718,6 @@ spec:
|
||||
pod labels will be ignored. The default value is empty.
|
||||
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -6305,7 +6347,6 @@ spec:
|
||||
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: |-
|
||||
@@ -6316,7 +6357,6 @@ spec:
|
||||
- Ignore: node taints are ignored. All nodes are included.
|
||||
|
||||
If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
type: string
|
||||
topologyKey:
|
||||
description: |-
|
||||
@@ -6499,7 +6539,7 @@ spec:
|
||||
properties:
|
||||
cgroupfs:
|
||||
description: |-
|
||||
CGroupFS defines the cgroup driver for Kubelet
|
||||
CGroupFS defines the cgroup driver for Kubelet
|
||||
https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/
|
||||
enum:
|
||||
- systemd
|
||||
@@ -6507,12 +6547,12 @@ spec:
|
||||
type: string
|
||||
preferredAddressTypes:
|
||||
default:
|
||||
- Hostname
|
||||
- InternalIP
|
||||
- ExternalIP
|
||||
- Hostname
|
||||
description: |-
|
||||
Ordered list of the preferred NodeAddressTypes to use for kubelet connections.
|
||||
Default to Hostname, InternalIP, ExternalIP.
|
||||
Default to InternalIP, ExternalIP, Hostname.
|
||||
items:
|
||||
enum:
|
||||
- Hostname
|
||||
@@ -6523,6 +6563,7 @@ spec:
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
x-kubernetes-list-type: set
|
||||
type: object
|
||||
version:
|
||||
description: Kubernetes Version for the tenant control plane
|
||||
@@ -6802,6 +6843,7 @@ spec:
|
||||
Ports is a list of records of service ports
|
||||
If used, every port defined in the service should have an entry in it
|
||||
items:
|
||||
description: PortStatus represents the error condition of a service port
|
||||
properties:
|
||||
error:
|
||||
description: |-
|
||||
@@ -7036,7 +7078,7 @@ spec:
|
||||
description: KubernetesDeploymentStatus defines the status for the Tenant Control Plane Deployment in the management cluster.
|
||||
properties:
|
||||
availableReplicas:
|
||||
description: Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
|
||||
description: Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
|
||||
format: int32
|
||||
type: integer
|
||||
collisionCount:
|
||||
@@ -7094,16 +7136,24 @@ spec:
|
||||
format: int64
|
||||
type: integer
|
||||
readyReplicas:
|
||||
description: readyReplicas is the number of pods targeted by this Deployment with a Ready Condition.
|
||||
description: Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
|
||||
format: int32
|
||||
type: integer
|
||||
replicas:
|
||||
description: Total number of non-terminated pods targeted by this deployment (their labels match the selector).
|
||||
description: Total number of non-terminating pods targeted by this deployment (their labels match the selector).
|
||||
format: int32
|
||||
type: integer
|
||||
selector:
|
||||
description: Selector is the label selector used to group the Tenant Control Plane Pods used by the scale subresource.
|
||||
type: string
|
||||
terminatingReplicas:
|
||||
description: |-
|
||||
Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
|
||||
.metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
|
||||
|
||||
This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
|
||||
format: int32
|
||||
type: integer
|
||||
unavailableReplicas:
|
||||
description: |-
|
||||
Total number of unavailable pods targeted by this deployment. This is the total number of
|
||||
@@ -7112,7 +7162,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
updatedReplicas:
|
||||
description: Total number of non-terminated pods targeted by this deployment that have the desired template spec.
|
||||
description: Total number of non-terminating pods targeted by this deployment that have the desired template spec.
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
@@ -7283,6 +7333,7 @@ spec:
|
||||
Ports is a list of records of service ports
|
||||
If used, every port defined in the service should have an entry in it
|
||||
items:
|
||||
description: PortStatus represents the error condition of a service port
|
||||
properties:
|
||||
error:
|
||||
description: |-
|
||||
@@ -7343,6 +7394,7 @@ spec:
|
||||
- Migrating
|
||||
- Ready
|
||||
- NotReady
|
||||
- Sleeping
|
||||
type: string
|
||||
version:
|
||||
description: Version is the running Kubernetes version of the Tenant Control Plane.
|
||||
|
||||
@@ -19,10 +19,6 @@ spec:
|
||||
labels:
|
||||
{{- include "kamaji.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
serviceAccountName: {{ include "kamaji.serviceAccountName" . }}
|
||||
|
||||
@@ -9,6 +9,10 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
|
||||
@@ -98,9 +98,12 @@ loggingDevel:
|
||||
# -- If specified, all the Kamaji instances with an unassigned DataStore will inherit this default value.
|
||||
defaultDatastoreName: default
|
||||
|
||||
# -- Subchart: See https://github.com/clastix/kamaji-etcd/blob/master/charts/kamaji-etcd/values.yaml
|
||||
kamaji-etcd:
|
||||
deploy: true
|
||||
fullnameOverride: kamaji-etcd
|
||||
## -- Important, this must match your management cluster's clusterDomain, otherwise the init jobs will fail
|
||||
clusterDomain: "cluster.local"
|
||||
datastore:
|
||||
enabled: true
|
||||
name: default
|
||||
@@ -108,4 +111,4 @@ kamaji-etcd:
|
||||
# -- Disable the analytics traces collection
|
||||
telemetry:
|
||||
disabled: false
|
||||
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
@@ -136,7 +137,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
tcpChannel, certChannel := make(controllers.TenantControlPlaneChannel), make(controllers.CertificateChannel)
|
||||
tcpChannel, certChannel := make(chan event.GenericEvent), make(chan event.GenericEvent)
|
||||
|
||||
if err = (&controllers.DataStore{Client: mgr.GetClient(), TenantControlPlaneTrigger: tcpChannel}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "DataStore")
|
||||
@@ -219,9 +220,9 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
},
|
||||
},
|
||||
routes.TenantControlPlaneValidate{}: {
|
||||
handlers.TenantControlPlaneCertSANs{},
|
||||
handlers.TenantControlPlaneName{},
|
||||
handlers.TenantControlPlaneVersion{},
|
||||
handlers.TenantControlPlaneKubeletAddresses{},
|
||||
handlers.TenantControlPlaneDataStore{Client: mgr.GetClient()},
|
||||
handlers.TenantControlPlaneDeployment{
|
||||
Client: mgr.GetClient(),
|
||||
|
||||
@@ -22,9 +22,10 @@ import (
|
||||
func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
// CLI flags
|
||||
var (
|
||||
tenantControlPlane string
|
||||
targetDataStore string
|
||||
timeout time.Duration
|
||||
tenantControlPlane string
|
||||
targetDataStore string
|
||||
cleanupPriorMigration bool
|
||||
timeout time.Duration
|
||||
)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
@@ -95,6 +96,20 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
defer targetConnection.Close()
|
||||
|
||||
if cleanupPriorMigration {
|
||||
log.Info("Checking if target DataStore should be clean-up prior migration")
|
||||
|
||||
if exists, _ := targetConnection.DBExists(ctx, tcp.Status.Storage.Setup.Schema); exists {
|
||||
log.Info("A colliding schema on target DataStore is present, cleaning up")
|
||||
|
||||
if dErr := targetConnection.DeleteDB(ctx, tcp.Status.Storage.Setup.Schema); dErr != nil {
|
||||
return fmt.Errorf("error cleaning up prior migration: %s", dErr.Error())
|
||||
}
|
||||
|
||||
log.Info("Cleaning up prior migration has been completed")
|
||||
}
|
||||
}
|
||||
// Start migrating from the old Datastore to the new one
|
||||
log.Info("migration from origin to target started")
|
||||
|
||||
@@ -110,6 +125,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
|
||||
cmd.Flags().StringVar(&tenantControlPlane, "tenant-control-plane", "", "Namespaced-name of the TenantControlPlane that must be migrated (e.g.: default/test)")
|
||||
cmd.Flags().StringVar(&targetDataStore, "target-datastore", "", "Name of the Datastore to which the TenantControlPlane will be migrated")
|
||||
cmd.Flags().BoolVar(&cleanupPriorMigration, "cleanup-prior-migration", false, "When set to true, migration job will drop existing data in the target DataStore: useful to avoid stale data when migrating back and forth between DataStores.")
|
||||
cmd.Flags().DurationVar(&timeout, "timeout", 5*time.Minute, "Amount of time for the context timeout")
|
||||
|
||||
_ = cmd.MarkFlagRequired("tenant-control-plane")
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: k8s-130
|
||||
name: k8s-133
|
||||
labels:
|
||||
tenant.clastix.io: k8s-130
|
||||
tenant.clastix.io: k8s-133
|
||||
spec:
|
||||
controlPlane:
|
||||
deployment:
|
||||
@@ -11,7 +11,7 @@ spec:
|
||||
service:
|
||||
serviceType: LoadBalancer
|
||||
kubernetes:
|
||||
version: "v1.30.0"
|
||||
version: "v1.33.0"
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
networkProfile:
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
)
|
||||
|
||||
type CertificateChannel chan event.GenericEvent
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
)
|
||||
|
||||
type CertificateLifecycle struct {
|
||||
Channel CertificateChannel
|
||||
Channel chan event.GenericEvent
|
||||
Deadline time.Duration
|
||||
|
||||
client client.Client
|
||||
@@ -99,7 +99,7 @@ func (s *CertificateLifecycle) Reconcile(ctx context.Context, request reconcile.
|
||||
|
||||
logger.Info("certificate is still valid, enqueuing back", "after", after.String())
|
||||
|
||||
return reconcile.Result{Requeue: true, RequeueAfter: after}, nil
|
||||
return reconcile.Result{RequeueAfter: after}, nil
|
||||
}
|
||||
|
||||
func (s *CertificateLifecycle) extractCertificateFromBareSecret(secret corev1.Secret) (*x509.Certificate, error) {
|
||||
|
||||
@@ -6,10 +6,12 @@ package controllers
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
@@ -21,62 +23,71 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/controllers/utils"
|
||||
)
|
||||
|
||||
type DataStore struct {
|
||||
Client client.Client
|
||||
// TenantControlPlaneTrigger is the channel used to communicate across the controllers:
|
||||
// if a Data Source is updated we have to be sure that the reconciliation of the certificates content
|
||||
// if a Data Source is updated, we have to be sure that the reconciliation of the certificates content
|
||||
// for each Tenant Control Plane is put in place properly.
|
||||
TenantControlPlaneTrigger TenantControlPlaneChannel
|
||||
TenantControlPlaneTrigger chan event.GenericEvent
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=kamaji.clastix.io,resources=datastores,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=kamaji.clastix.io,resources=datastores/status,verbs=get;update;patch
|
||||
|
||||
func (r *DataStore) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||
log := log.FromContext(ctx)
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
ds := &kamajiv1alpha1.DataStore{}
|
||||
err := r.Client.Get(ctx, request.NamespacedName, ds)
|
||||
if k8serrors.IsNotFound(err) {
|
||||
log.Info("resource have been deleted, skipping")
|
||||
var ds kamajiv1alpha1.DataStore
|
||||
if err := r.Client.Get(ctx, request.NamespacedName, &ds); err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
logger.Info("resource have been deleted, skipping")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
log.Error(err, "cannot retrieve the required resource")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
logger.Error(err, "cannot retrieve the required resource")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
tcpList := kamajiv1alpha1.TenantControlPlaneList{}
|
||||
var tcpList kamajiv1alpha1.TenantControlPlaneList
|
||||
|
||||
if err := r.Client.List(ctx, &tcpList, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(kamajiv1alpha1.TenantControlPlaneUsedDataStoreKey, ds.GetName()),
|
||||
}); err != nil {
|
||||
log.Error(err, "cannot retrieve list of the Tenant Control Plane using the following instance")
|
||||
updateErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
if lErr := r.Client.List(ctx, &tcpList, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(kamajiv1alpha1.TenantControlPlaneUsedDataStoreKey, ds.GetName()),
|
||||
}); lErr != nil {
|
||||
return errors.Wrap(lErr, "cannot retrieve list of the Tenant Control Plane using the following instance")
|
||||
}
|
||||
// Updating the status with the list of Tenant Control Plane using the following Data Source
|
||||
tcpSets := sets.NewString()
|
||||
for _, tcp := range tcpList.Items {
|
||||
tcpSets.Insert(getNamespacedName(tcp.GetNamespace(), tcp.GetName()).String())
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// Updating the status with the list of Tenant Control Plane using the following Data Source
|
||||
tcpSets := sets.NewString()
|
||||
for _, tcp := range tcpList.Items {
|
||||
tcpSets.Insert(getNamespacedName(tcp.GetNamespace(), tcp.GetName()).String())
|
||||
}
|
||||
ds.Status.UsedBy = tcpSets.List()
|
||||
|
||||
ds.Status.UsedBy = tcpSets.List()
|
||||
if sErr := r.Client.Status().Update(ctx, &ds); sErr != nil {
|
||||
return errors.Wrap(sErr, "cannot update the status for the given instance")
|
||||
}
|
||||
|
||||
if err := r.Client.Status().Update(ctx, ds); err != nil {
|
||||
log.Error(err, "cannot update the status for the given instance")
|
||||
return nil
|
||||
})
|
||||
if updateErr != nil {
|
||||
logger.Error(updateErr, "cannot update DataStore status")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
return reconcile.Result{}, updateErr
|
||||
}
|
||||
// Triggering the reconciliation of the Tenant Control Plane upon a Secret change
|
||||
for _, i := range tcpList.Items {
|
||||
tcp := i
|
||||
for _, tcp := range tcpList.Items {
|
||||
var shrunkTCP kamajiv1alpha1.TenantControlPlane
|
||||
|
||||
r.TenantControlPlaneTrigger <- event.GenericEvent{Object: &tcp}
|
||||
shrunkTCP.Name = tcp.Name
|
||||
shrunkTCP.Namespace = tcp.Namespace
|
||||
|
||||
go utils.TriggerChannel(ctx, r.TenantControlPlaneTrigger, shrunkTCP)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
@@ -95,7 +106,7 @@ func (r *DataStore) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
//nolint:forcetypeassert
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
For(&kamajiv1alpha1.DataStore{}, builder.WithPredicates(
|
||||
predicate.ResourceVersionChangedPredicate{},
|
||||
predicate.GenerationChangedPredicate{},
|
||||
)).
|
||||
Watches(&kamajiv1alpha1.TenantControlPlane{}, handler.Funcs{
|
||||
CreateFunc: func(_ context.Context, createEvent event.TypedCreateEvent[client.Object], w workqueue.TypedRateLimitingInterface[reconcile.Request]) {
|
||||
|
||||
@@ -30,8 +30,7 @@ import (
|
||||
)
|
||||
|
||||
type CoreDNS struct {
|
||||
logger logr.Logger
|
||||
|
||||
Logger logr.Logger
|
||||
AdminClient client.Client
|
||||
GetTenantControlPlaneFunc utils.TenantControlPlaneRetrievalFn
|
||||
TriggerChannel chan event.GenericEvent
|
||||
@@ -40,43 +39,40 @@ type CoreDNS struct {
|
||||
func (c *CoreDNS) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
|
||||
tcp, err := c.GetTenantControlPlaneFunc()
|
||||
if err != nil {
|
||||
c.logger.Error(err, "cannot retrieve TenantControlPlane")
|
||||
c.Logger.Error(err, "cannot retrieve TenantControlPlane")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
c.logger.Info("start processing")
|
||||
c.Logger.Info("start processing")
|
||||
|
||||
resource := &addons.CoreDNS{Client: c.AdminClient}
|
||||
|
||||
result, handlingErr := resources.Handle(ctx, resource, tcp)
|
||||
if handlingErr != nil {
|
||||
c.logger.Error(handlingErr, "resource process failed", "resource", resource.GetName())
|
||||
c.Logger.Error(handlingErr, "resource process failed", "resource", resource.GetName())
|
||||
|
||||
return reconcile.Result{}, handlingErr
|
||||
}
|
||||
|
||||
if result == controllerutil.OperationResultNone {
|
||||
c.logger.Info("reconciliation completed")
|
||||
c.Logger.Info("reconciliation completed")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
if err = utils.UpdateStatus(ctx, c.AdminClient, tcp, resource); err != nil {
|
||||
c.logger.Error(err, "update status failed", "resource", resource.GetName())
|
||||
c.Logger.Error(err, "update status failed", "resource", resource.GetName())
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
c.logger.Info("reconciliation processed")
|
||||
c.Logger.Info("reconciliation processed")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (c *CoreDNS) SetupWithManager(mgr manager.Manager) error {
|
||||
c.logger = mgr.GetLogger().WithName("coredns")
|
||||
c.TriggerChannel = make(chan event.GenericEvent)
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
WithOptions(controller.TypedOptions[reconcile.Request]{SkipNameValidation: ptr.To(true)}).
|
||||
For(&rbacv1.ClusterRoleBinding{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
|
||||
@@ -31,8 +31,7 @@ import (
|
||||
)
|
||||
|
||||
type KonnectivityAgent struct {
|
||||
logger logr.Logger
|
||||
|
||||
Logger logr.Logger
|
||||
AdminClient client.Client
|
||||
GetTenantControlPlaneFunc utils.TenantControlPlaneRetrievalFn
|
||||
TriggerChannel chan event.GenericEvent
|
||||
@@ -41,43 +40,40 @@ type KonnectivityAgent struct {
|
||||
func (k *KonnectivityAgent) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
|
||||
tcp, err := k.GetTenantControlPlaneFunc()
|
||||
if err != nil {
|
||||
k.logger.Error(err, "cannot retrieve TenantControlPlane")
|
||||
k.Logger.Error(err, "cannot retrieve TenantControlPlane")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
for _, resource := range controllers.GetExternalKonnectivityResources(k.AdminClient) {
|
||||
k.logger.Info("start processing", "resource", resource.GetName())
|
||||
k.Logger.Info("start processing", "resource", resource.GetName())
|
||||
|
||||
result, handlingErr := resources.Handle(ctx, resource, tcp)
|
||||
if handlingErr != nil {
|
||||
k.logger.Error(handlingErr, "resource process failed", "resource", resource.GetName())
|
||||
k.Logger.Error(handlingErr, "resource process failed", "resource", resource.GetName())
|
||||
|
||||
return reconcile.Result{}, handlingErr
|
||||
}
|
||||
|
||||
if result == controllerutil.OperationResultNone {
|
||||
k.logger.Info("resource processed", "resource", resource.GetName())
|
||||
k.Logger.Info("resource processed", "resource", resource.GetName())
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if err = utils.UpdateStatus(ctx, k.AdminClient, tcp, resource); err != nil {
|
||||
k.logger.Error(err, "update status failed", "resource", resource.GetName())
|
||||
k.Logger.Error(err, "update status failed", "resource", resource.GetName())
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
k.logger.Info("reconciliation completed")
|
||||
k.Logger.Info("reconciliation completed")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (k *KonnectivityAgent) SetupWithManager(mgr manager.Manager) error {
|
||||
k.logger = mgr.GetLogger().WithName("konnectivity_agent")
|
||||
k.TriggerChannel = make(chan event.GenericEvent)
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
WithOptions(controller.TypedOptions[reconcile.Request]{SkipNameValidation: ptr.To(true)}).
|
||||
For(&appsv1.DaemonSet{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
|
||||
@@ -65,7 +65,6 @@ func (k *KubeadmPhase) Reconcile(ctx context.Context, _ reconcile.Request) (reco
|
||||
|
||||
func (k *KubeadmPhase) SetupWithManager(mgr manager.Manager) error {
|
||||
k.logger = mgr.GetLogger().WithName(k.Phase.GetName())
|
||||
k.TriggerChannel = make(chan event.GenericEvent)
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
WithOptions(controller.TypedOptions[reconcile.Request]{SkipNameValidation: ptr.To(true)}).
|
||||
|
||||
@@ -30,53 +30,49 @@ import (
|
||||
)
|
||||
|
||||
type KubeProxy struct {
|
||||
Logger logr.Logger
|
||||
AdminClient client.Client
|
||||
GetTenantControlPlaneFunc utils.TenantControlPlaneRetrievalFn
|
||||
TriggerChannel chan event.GenericEvent
|
||||
|
||||
logger logr.Logger
|
||||
}
|
||||
|
||||
func (k *KubeProxy) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
|
||||
tcp, err := k.GetTenantControlPlaneFunc()
|
||||
if err != nil {
|
||||
k.logger.Error(err, "cannot retrieve TenantControlPlane")
|
||||
k.Logger.Error(err, "cannot retrieve TenantControlPlane")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
k.logger.Info("start processing")
|
||||
k.Logger.Info("start processing")
|
||||
|
||||
resource := &addons.KubeProxy{Client: k.AdminClient}
|
||||
|
||||
result, handlingErr := resources.Handle(ctx, resource, tcp)
|
||||
if handlingErr != nil {
|
||||
k.logger.Error(handlingErr, "resource process failed", "resource", resource.GetName())
|
||||
k.Logger.Error(handlingErr, "resource process failed", "resource", resource.GetName())
|
||||
|
||||
return reconcile.Result{}, handlingErr
|
||||
}
|
||||
|
||||
if result == controllerutil.OperationResultNone {
|
||||
k.logger.Info("reconciliation completed")
|
||||
k.Logger.Info("reconciliation completed")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
if err = utils.UpdateStatus(ctx, k.AdminClient, tcp, resource); err != nil {
|
||||
k.logger.Error(err, "update status failed")
|
||||
k.Logger.Error(err, "update status failed")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
k.logger.Info("reconciliation processed")
|
||||
k.Logger.Info("reconciliation processed")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (k *KubeProxy) SetupWithManager(mgr manager.Manager) error {
|
||||
k.logger = mgr.GetLogger().WithName("kube_proxy")
|
||||
k.TriggerChannel = make(chan event.GenericEvent)
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
WithOptions(controller.TypedOptions[reconcile.Request]{SkipNameValidation: ptr.To(true)}).
|
||||
For(&rbacv1.ClusterRoleBinding{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
|
||||
@@ -6,6 +6,7 @@ package controllers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
@@ -29,9 +30,8 @@ import (
|
||||
)
|
||||
|
||||
type Migrate struct {
|
||||
client client.Client
|
||||
logger logr.Logger
|
||||
|
||||
Client client.Client
|
||||
Logger logr.Logger
|
||||
GetTenantControlPlaneFunc utils.TenantControlPlaneRetrievalFn
|
||||
WebhookNamespace string
|
||||
WebhookServiceName string
|
||||
@@ -46,7 +46,7 @@ func (m *Migrate) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile
|
||||
}
|
||||
// Cannot detect the status of the TenantControlPlane, enqueuing back
|
||||
if tcp.Status.Kubernetes.Version.Status == nil {
|
||||
return reconcile.Result{Requeue: true}, nil
|
||||
return reconcile.Result{RequeueAfter: time.Second}, nil
|
||||
}
|
||||
|
||||
switch *tcp.Status.Kubernetes.Version.Status {
|
||||
@@ -57,7 +57,7 @@ func (m *Migrate) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
m.logger.Error(err, "reconciliation failed")
|
||||
m.Logger.Error(err, "reconciliation failed")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -66,7 +66,7 @@ func (m *Migrate) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile
|
||||
}
|
||||
|
||||
func (m *Migrate) cleanup(ctx context.Context) error {
|
||||
if err := m.client.Delete(ctx, m.object()); err != nil {
|
||||
if err := m.Client.Delete(ctx, m.object()); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
@@ -80,7 +80,7 @@ func (m *Migrate) cleanup(ctx context.Context) error {
|
||||
func (m *Migrate) createOrUpdate(ctx context.Context) error {
|
||||
obj := m.object()
|
||||
|
||||
_, err := utilities.CreateOrUpdateWithConflict(ctx, m.client, obj, func() error {
|
||||
_, err := utilities.CreateOrUpdateWithConflict(ctx, m.Client, obj, func() error {
|
||||
obj.Webhooks = []admissionregistrationv1.ValidatingWebhook{
|
||||
{
|
||||
Name: "leases.migrate.kamaji.clastix.io",
|
||||
@@ -178,8 +178,6 @@ func (m *Migrate) createOrUpdate(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (m *Migrate) SetupWithManager(mgr manager.Manager) error {
|
||||
m.client = mgr.GetClient()
|
||||
m.logger = mgr.GetLogger().WithName("migrate")
|
||||
m.TriggerChannel = make(chan event.GenericEvent)
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
|
||||
@@ -6,8 +6,9 @@ package soot
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/ptr"
|
||||
@@ -34,14 +35,19 @@ import (
|
||||
)
|
||||
|
||||
type sootItem struct {
|
||||
triggers []chan event.GenericEvent
|
||||
cancelFn context.CancelFunc
|
||||
triggers []chan event.GenericEvent
|
||||
cancelFn context.CancelFunc
|
||||
completedCh chan struct{}
|
||||
}
|
||||
|
||||
type sootMap map[string]sootItem
|
||||
|
||||
const (
|
||||
sootManagerAnnotation = "kamaji.clastix.io/soot"
|
||||
sootManagerFailedAnnotation = "failed"
|
||||
)
|
||||
|
||||
type Manager struct {
|
||||
client client.Client
|
||||
sootMap sootMap
|
||||
// sootManagerErrChan is the channel that is going to be used
|
||||
// when the soot manager cannot start due to any kind of problem.
|
||||
@@ -59,7 +65,7 @@ func (m *Manager) retrieveTenantControlPlane(ctx context.Context, request reconc
|
||||
return func() (*kamajiv1alpha1.TenantControlPlane, error) {
|
||||
tcp := &kamajiv1alpha1.TenantControlPlane{}
|
||||
|
||||
if err := m.client.Get(ctx, request.NamespacedName, tcp); err != nil {
|
||||
if err := m.AdminClient.Get(ctx, request.NamespacedName, tcp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -93,39 +99,82 @@ func (m *Manager) cleanup(ctx context.Context, req reconcile.Request, tenantCont
|
||||
}
|
||||
|
||||
v.cancelFn()
|
||||
// TODO(prometherion): the 10 seconds is an hardcoded number,
|
||||
// it's widely used across the code base as a timeout with the API Server.
|
||||
// Evaluate if we would need to make this configurable globally.
|
||||
deadlineCtx, deadlineFn := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer deadlineFn()
|
||||
|
||||
select {
|
||||
case _, open := <-v.completedCh:
|
||||
if !open {
|
||||
log.FromContext(ctx).Info("soot manager completed its process")
|
||||
|
||||
break
|
||||
}
|
||||
case <-deadlineCtx.Done():
|
||||
log.FromContext(ctx).Error(deadlineCtx.Err(), "soot manager didn't exit to timeout")
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
delete(m.sootMap, tcpName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) retryTenantControlPlaneAnnotations(ctx context.Context, request reconcile.Request, modifierFn func(annotations map[string]string)) error {
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
tcp, err := m.retrieveTenantControlPlane(ctx, request)()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tcp.Annotations == nil {
|
||||
tcp.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
modifierFn(tcp.Annotations)
|
||||
|
||||
tcp.SetAnnotations(tcp.Annotations)
|
||||
|
||||
return m.AdminClient.Update(ctx, tcp)
|
||||
})
|
||||
}
|
||||
|
||||
//nolint:maintidx
|
||||
func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
// Retrieving the TenantControlPlane:
|
||||
// in case of deletion, we must be sure to properly remove from the memory the soot manager.
|
||||
tcp := &kamajiv1alpha1.TenantControlPlane{}
|
||||
if err = m.client.Get(ctx, request.NamespacedName, tcp); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
if err = m.AdminClient.Get(ctx, request.NamespacedName, tcp); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, m.cleanup(ctx, request, nil)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// Handling finalizer if the TenantControlPlane is marked for deletion:
|
||||
tcpStatus := ptr.Deref(tcp.Status.Kubernetes.Version.Status, kamajiv1alpha1.VersionProvisioning)
|
||||
// Handling finalizer if the TenantControlPlane is marked for deletion or scaled to zero:
|
||||
// the clean-up function is already taking care to stop the manager, if this exists.
|
||||
if tcp.GetDeletionTimestamp() != nil {
|
||||
if tcp.GetDeletionTimestamp() != nil || tcpStatus == kamajiv1alpha1.VersionSleeping {
|
||||
if controllerutil.ContainsFinalizer(tcp, finalizers.SootFinalizer) {
|
||||
return reconcile.Result{}, m.cleanup(ctx, request, tcp)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
tcpStatus := *tcp.Status.Kubernetes.Version.Status
|
||||
// Triggering the reconciliation of the underlying controllers of
|
||||
// the soot manager if this is already registered.
|
||||
v, ok := m.sootMap[request.String()]
|
||||
if ok {
|
||||
switch {
|
||||
case tcp.Annotations != nil && tcp.Annotations[sootManagerAnnotation] == sootManagerFailedAnnotation:
|
||||
delete(m.sootMap, request.String())
|
||||
|
||||
return reconcile.Result{}, m.retryTenantControlPlaneAnnotations(ctx, request, func(annotations map[string]string) {
|
||||
delete(annotations, sootManagerAnnotation)
|
||||
})
|
||||
case tcpStatus == kamajiv1alpha1.VersionCARotating:
|
||||
// The TenantControlPlane CA has been rotated, it means the running manager
|
||||
// must be restarted to avoid certificate signed by unknown authority errors.
|
||||
@@ -137,7 +186,12 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
return reconcile.Result{}, m.cleanup(ctx, request, tcp)
|
||||
default:
|
||||
for _, trigger := range v.triggers {
|
||||
trigger <- event.GenericEvent{Object: tcp}
|
||||
var shrunkTCP kamajiv1alpha1.TenantControlPlane
|
||||
|
||||
shrunkTCP.Name = tcp.Namespace
|
||||
shrunkTCP.Namespace = tcp.Namespace
|
||||
|
||||
go utils.TriggerChannel(ctx, trigger, shrunkTCP)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,7 +199,7 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
}
|
||||
// No need to start a soot manager if the TenantControlPlane is not ready:
|
||||
// enqueuing back is not required since we're going to get that event once ready.
|
||||
if tcpStatus == kamajiv1alpha1.VersionNotReady || tcpStatus == kamajiv1alpha1.VersionCARotating {
|
||||
if tcpStatus == kamajiv1alpha1.VersionNotReady || tcpStatus == kamajiv1alpha1.VersionCARotating || tcpStatus == kamajiv1alpha1.VersionSleeping {
|
||||
log.FromContext(ctx).Info("skipping start of the soot manager for a not ready instance")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
@@ -159,11 +213,11 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
return nil
|
||||
})
|
||||
|
||||
return reconcile.Result{Requeue: true}, finalizerErr
|
||||
return reconcile.Result{RequeueAfter: time.Second}, finalizerErr
|
||||
}
|
||||
// Generating the manager and starting it:
|
||||
// in case of any error, reconciling the request to start it back from the beginning.
|
||||
tcpRest, err := utilities.GetRESTClientConfig(ctx, m.client, tcp)
|
||||
tcpRest, err := utilities.GetRESTClientConfig(ctx, m.AdminClient, tcp)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -178,14 +232,14 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
|
||||
mgr, err := controllerruntime.NewManager(tcpRest, controllerruntime.Options{
|
||||
Logger: log.Log.WithName(fmt.Sprintf("soot_%s_%s", tcp.GetNamespace(), tcp.GetName())),
|
||||
Scheme: m.client.Scheme(),
|
||||
Scheme: m.AdminClient.Scheme(),
|
||||
Metrics: metricsserver.Options{
|
||||
BindAddress: "0",
|
||||
},
|
||||
NewClient: func(config *rest.Config, _ client.Options) (client.Client, error) {
|
||||
return client.New(config, client.Options{
|
||||
Scheme: m.client.Scheme(),
|
||||
})
|
||||
NewClient: func(config *rest.Config, opts client.Options) (client.Client, error) {
|
||||
opts.Scheme = m.AdminClient.Scheme()
|
||||
|
||||
return client.New(config, opts)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
@@ -199,6 +253,8 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
WebhookServiceName: m.MigrateServiceName,
|
||||
WebhookCABundle: m.MigrateCABundle,
|
||||
GetTenantControlPlaneFunc: m.retrieveTenantControlPlane(tcpCtx, request),
|
||||
Client: mgr.GetClient(),
|
||||
Logger: mgr.GetLogger().WithName("migrate"),
|
||||
}
|
||||
if err = migrate.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
@@ -207,6 +263,8 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
konnectivityAgent := &controllers.KonnectivityAgent{
|
||||
AdminClient: m.AdminClient,
|
||||
GetTenantControlPlaneFunc: m.retrieveTenantControlPlane(tcpCtx, request),
|
||||
Logger: mgr.GetLogger().WithName("konnectivity_agent"),
|
||||
TriggerChannel: make(chan event.GenericEvent),
|
||||
}
|
||||
if err = konnectivityAgent.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
@@ -215,6 +273,8 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
kubeProxy := &controllers.KubeProxy{
|
||||
AdminClient: m.AdminClient,
|
||||
GetTenantControlPlaneFunc: m.retrieveTenantControlPlane(tcpCtx, request),
|
||||
Logger: mgr.GetLogger().WithName("kube_proxy"),
|
||||
TriggerChannel: make(chan event.GenericEvent),
|
||||
}
|
||||
if err = kubeProxy.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
@@ -223,6 +283,8 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
coreDNS := &controllers.CoreDNS{
|
||||
AdminClient: m.AdminClient,
|
||||
GetTenantControlPlaneFunc: m.retrieveTenantControlPlane(tcpCtx, request),
|
||||
Logger: mgr.GetLogger().WithName("coredns"),
|
||||
TriggerChannel: make(chan event.GenericEvent),
|
||||
}
|
||||
if err = coreDNS.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
@@ -234,6 +296,7 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
Client: m.AdminClient,
|
||||
Phase: resources.PhaseUploadConfigKubeadm,
|
||||
},
|
||||
TriggerChannel: make(chan event.GenericEvent),
|
||||
}
|
||||
if err = uploadKubeadmConfig.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
@@ -245,6 +308,7 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
Client: m.AdminClient,
|
||||
Phase: resources.PhaseUploadConfigKubelet,
|
||||
},
|
||||
TriggerChannel: make(chan event.GenericEvent),
|
||||
}
|
||||
if err = uploadKubeletConfig.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
@@ -256,6 +320,7 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
Client: m.AdminClient,
|
||||
Phase: resources.PhaseBootstrapToken,
|
||||
},
|
||||
TriggerChannel: make(chan event.GenericEvent),
|
||||
}
|
||||
if err = bootstrapToken.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
@@ -267,19 +332,35 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
Client: m.AdminClient,
|
||||
Phase: resources.PhaseClusterAdminRBAC,
|
||||
},
|
||||
TriggerChannel: make(chan event.GenericEvent),
|
||||
}
|
||||
if err = kubeadmRbac.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
completedCh := make(chan struct{})
|
||||
// Starting the manager
|
||||
go func() {
|
||||
if err = mgr.Start(tcpCtx); err != nil {
|
||||
log.FromContext(ctx).Error(err, "unable to start soot manager")
|
||||
// The sootManagerAnnotation is used to propagate the error between reconciliations with its state:
|
||||
// this is required to avoid mutex and prevent concurrent read/write on the soot map
|
||||
annotationErr := m.retryTenantControlPlaneAnnotations(ctx, request, func(annotations map[string]string) {
|
||||
annotations[sootManagerAnnotation] = sootManagerFailedAnnotation
|
||||
})
|
||||
if annotationErr != nil {
|
||||
log.FromContext(ctx).Error(err, "unable to update TenantControlPlane for soot failed annotation")
|
||||
}
|
||||
// When the manager cannot start we're enqueuing back the request to take advantage of the backoff factor
|
||||
// of the queue: this is a goroutine and cannot return an error since the manager is running on its own,
|
||||
// using the sootManagerErrChan channel we can trigger a reconciliation although the TCP hadn't any change.
|
||||
m.sootManagerErrChan <- event.GenericEvent{Object: tcp}
|
||||
var shrunkTCP kamajiv1alpha1.TenantControlPlane
|
||||
|
||||
shrunkTCP.Name = tcp.Name
|
||||
shrunkTCP.Namespace = tcp.Namespace
|
||||
|
||||
m.sootManagerErrChan <- event.GenericEvent{Object: &shrunkTCP}
|
||||
}
|
||||
close(completedCh)
|
||||
}()
|
||||
|
||||
m.sootMap[request.NamespacedName.String()] = sootItem{
|
||||
@@ -292,14 +373,14 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
uploadKubeletConfig.TriggerChannel,
|
||||
bootstrapToken.TriggerChannel,
|
||||
},
|
||||
cancelFn: tcpCancelFn,
|
||||
cancelFn: tcpCancelFn,
|
||||
completedCh: completedCh,
|
||||
}
|
||||
|
||||
return reconcile.Result{Requeue: true}, nil
|
||||
return reconcile.Result{RequeueAfter: time.Second}, nil
|
||||
}
|
||||
|
||||
func (m *Manager) SetupWithManager(mgr manager.Manager) error {
|
||||
m.client = mgr.GetClient()
|
||||
m.sootManagerErrChan = make(chan event.GenericEvent)
|
||||
m.sootMap = make(map[string]sootItem)
|
||||
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package controllers
|
||||
|
||||
import "sigs.k8s.io/controller-runtime/pkg/event"
|
||||
|
||||
type TenantControlPlaneChannel chan event.GenericEvent
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
@@ -81,7 +82,7 @@ func (m *TelemetryController) collectStats(ctx context.Context, uid string) {
|
||||
|
||||
for _, tcp := range tcpList.Items {
|
||||
switch {
|
||||
case tcp.Spec.ControlPlane.Deployment.Replicas == nil || *tcp.Spec.ControlPlane.Deployment.Replicas == 0:
|
||||
case ptr.Deref(tcp.Status.Kubernetes.Version.Status, kamajiv1alpha1.VersionProvisioning) == kamajiv1alpha1.VersionSleeping:
|
||||
stats.TenantControlPlanes.Sleeping++
|
||||
case tcp.Status.Kubernetes.Version.Status != nil && *tcp.Status.Kubernetes.Version.Status == kamajiv1alpha1.VersionNotReady:
|
||||
stats.TenantControlPlanes.NotReady++
|
||||
|
||||
@@ -44,7 +44,7 @@ type TenantControlPlaneReconciler struct {
|
||||
Client client.Client
|
||||
APIReader client.Reader
|
||||
Config TenantControlPlaneReconcilerConfig
|
||||
TriggerChan TenantControlPlaneChannel
|
||||
TriggerChan chan event.GenericEvent
|
||||
KamajiNamespace string
|
||||
KamajiServiceAccount string
|
||||
KamajiService string
|
||||
@@ -53,7 +53,7 @@ type TenantControlPlaneReconciler struct {
|
||||
// CertificateChan is the channel used by the CertificateLifecycleController that is checking for
|
||||
// certificates and kubeconfig user certs validity: a generic event for the given TCP will be triggered
|
||||
// once the validity threshold for the given certificate is reached.
|
||||
CertificateChan CertificateChannel
|
||||
CertificateChan chan event.GenericEvent
|
||||
|
||||
clock mutex.Clock
|
||||
}
|
||||
@@ -101,11 +101,11 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
case errors.As(err, &mutex.ErrTimeout):
|
||||
log.Info("acquire timed out, current process is blocked by another reconciliation")
|
||||
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
return ctrl.Result{RequeueAfter: time.Second}, nil
|
||||
case errors.As(err, &mutex.ErrCancelled):
|
||||
log.Info("acquire cancelled")
|
||||
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
return ctrl.Result{RequeueAfter: time.Second}, nil
|
||||
default:
|
||||
log.Error(err, "acquire failed")
|
||||
|
||||
@@ -125,7 +125,7 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
if errors.Is(err, ErrMissingDataStore) {
|
||||
log.Info(err.Error())
|
||||
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
return ctrl.Result{RequeueAfter: time.Second}, nil
|
||||
}
|
||||
|
||||
log.Error(err, "cannot retrieve the DataStore for the given instance")
|
||||
@@ -186,7 +186,7 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
if kamajierrors.ShouldReconcileErrorBeIgnored(err) {
|
||||
log.V(1).Info("sentinel error, enqueuing back request", "error", err.Error())
|
||||
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
return ctrl.Result{RequeueAfter: time.Second}, nil
|
||||
}
|
||||
|
||||
log.Error(err, "handling of resource failed", "resource", resource.GetName())
|
||||
@@ -199,6 +199,12 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
}
|
||||
|
||||
if err = utils.UpdateStatus(ctx, r.Client, tenantControlPlane, resource); err != nil {
|
||||
if kamajierrors.ShouldReconcileErrorBeIgnored(err) {
|
||||
log.V(1).Info("sentinel error, enqueuing back request", "error", err.Error())
|
||||
|
||||
return ctrl.Result{RequeueAfter: time.Second}, nil
|
||||
}
|
||||
|
||||
log.Error(err, "update of the resource failed", "resource", resource.GetName())
|
||||
|
||||
return ctrl.Result{}, err
|
||||
@@ -209,7 +215,7 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
if result == resources.OperationResultEnqueueBack {
|
||||
log.Info("requested enqueuing back", "resources", resource.GetName())
|
||||
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
return ctrl.Result{RequeueAfter: time.Second}, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
26
controllers/utils/trigger_channel.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
|
||||
func TriggerChannel(ctx context.Context, receiver chan event.GenericEvent, tcp kamajiv1alpha1.TenantControlPlane) {
|
||||
deadlineCtx, cancelFn := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancelFn()
|
||||
|
||||
select {
|
||||
case receiver <- event.GenericEvent{Object: &tcp}:
|
||||
return
|
||||
case <-deadlineCtx.Done():
|
||||
log.FromContext(ctx).Error(deadlineCtx.Err(), "cannot send due to timeout")
|
||||
}
|
||||
}
|
||||
@@ -104,7 +104,9 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
- apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
|
||||
33
deploy/kamaji-aws.env
Normal file
@@ -0,0 +1,33 @@
|
||||
# aws parameters
|
||||
export KAMAJI_REGION=eu-west-3
|
||||
export KAMAJI_AZ=eu-west-3a
|
||||
export KAMAJI_CLUSTER_VERSION="1.32"
|
||||
export KAMAJI_CLUSTER=kamaji-2
|
||||
export KAMAJI_NODE_NG=${KAMAJI_CLUSTER}-${KAMAJI_REGION}-ng1
|
||||
export KAMAJI_NODE_TYPE=t3.medium
|
||||
export KAMAJI_VPC_NAME=eksctl-${KAMAJI_CLUSTER}-cluster/VPC
|
||||
export KAMAJI_VPC_CIDR=192.168.0.0/16
|
||||
export KAMAJI_PUBLIC_SUBNET_NAME=eksctl-${KAMAJI_CLUSTER}-cluster/SubnetPublicEUWEST3A
|
||||
export KAMAJI_PRIVATE_SUBNET_NAME=eksctl-${KAMAJI_CLUSTER}-cluster/SubnetPrivateEUWEST3A
|
||||
|
||||
|
||||
# kamaji parameters
|
||||
export KAMAJI_NAMESPACE=kamaji-system
|
||||
|
||||
# tenant cluster parameters
|
||||
export TENANT_NAMESPACE=tenant-00
|
||||
export TENANT_NAME=tenant-00
|
||||
export TENANT_DOMAIN=internal.kamaji.aws.com
|
||||
export TENANT_VERSION=v1.31.0
|
||||
export TENANT_PORT=6443 # port used to expose the tenant api server
|
||||
export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server
|
||||
export TENANT_POD_CIDR=10.36.0.0/16
|
||||
export TENANT_SVC_CIDR=10.96.0.0/16
|
||||
export TENANT_DNS_SERVICE=10.96.0.10
|
||||
|
||||
export TENANT_VM_SIZE=t3.medium
|
||||
export TENANT_ASG_MIN_SIZE=1
|
||||
export TENANT_ASG_MAX_SIZE=1
|
||||
export TENANT_ASG_DESIRED_SIZE=1
|
||||
export TENANT_SUBNET_ADDRESS=10.0.4.0/24
|
||||
export TENANT_ASG_NAME=$TENANT_NAME-workers
|
||||
@@ -15,7 +15,7 @@ export KAMAJI_NAMESPACE=kamaji-system
|
||||
export TENANT_NAMESPACE=default
|
||||
export TENANT_NAME=tenant-00
|
||||
export TENANT_DOMAIN=$KAMAJI_REGION.cloudapp.azure.com
|
||||
export TENANT_VERSION=v1.26.0
|
||||
export TENANT_VERSION=v1.31.0
|
||||
export TENANT_PORT=6443 # port used to expose the tenant api server
|
||||
export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server
|
||||
export TENANT_POD_CIDR=10.36.0.0/16
|
||||
|
||||
@@ -5,7 +5,7 @@ export KAMAJI_NAMESPACE=kamaji-system
|
||||
export TENANT_NAMESPACE=default
|
||||
export TENANT_NAME=tenant-00
|
||||
export TENANT_DOMAIN=clastix.labs
|
||||
export TENANT_VERSION=v1.26.0
|
||||
export TENANT_VERSION=v1.31.0
|
||||
export TENANT_PORT=6443 # port used to expose the tenant api server
|
||||
export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server
|
||||
export TENANT_POD_CIDR=10.36.0.0/16
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
kind_path := $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
|
||||
|
||||
include ../etcd/Makefile
|
||||
|
||||
.PHONY: kind ingress-nginx
|
||||
|
||||
.DEFAULT_GOAL := kamaji
|
||||
|
||||
prometheus-stack:
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
helm repo update
|
||||
helm install prometheus-stack --create-namespace -n monitoring prometheus-community/kube-prometheus-stack
|
||||
|
||||
reqs: kind ingress-nginx cert-manager
|
||||
|
||||
cert-manager:
|
||||
@kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.10.1/cert-manager.yaml
|
||||
|
||||
kamaji: reqs
|
||||
helm install kamaji --create-namespace -n kamaji-system $(kind_path)/../../charts/kamaji
|
||||
|
||||
destroy: kind/destroy etcd-certificates/cleanup
|
||||
|
||||
kind:
|
||||
@kind create cluster --config $(kind_path)/kind-kamaji.yaml
|
||||
|
||||
kind/destroy:
|
||||
@kind delete cluster --name kamaji
|
||||
|
||||
ingress-nginx: ingress-nginx-install
|
||||
|
||||
ingress-nginx-install:
|
||||
kubectl apply -f $(kind_path)/nginx-deploy.yaml
|
||||
|
||||
kamaji-kind-worker-join:
|
||||
$(kind_path)/join-node.bash
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# Constants
|
||||
export DOCKER_IMAGE_NAME="kindest/node"
|
||||
export DOCKER_NETWORK="kind"
|
||||
|
||||
# Variables
|
||||
export KUBERNETES_VERSION=${1:-v1.23.4}
|
||||
export KUBECONFIG="${KUBECONFIG:-/tmp/kubeconfig}"
|
||||
|
||||
if [ -z $2 ]
|
||||
then
|
||||
MAPPING_PORT=""
|
||||
else
|
||||
MAPPING_PORT="-p ${2}:80"
|
||||
fi
|
||||
|
||||
clear
|
||||
echo "Welcome to join a new node to the Kind network"
|
||||
|
||||
echo -ne "\nChecking right kubeconfig\n"
|
||||
kubectl cluster-info
|
||||
echo "Are you pointing to the right tenant control plane? (Type return to continue)"
|
||||
read
|
||||
|
||||
JOIN_CMD="$(kubeadm --kubeconfig=${KUBECONFIG} token create --print-join-command) --ignore-preflight-errors=SystemVerification"
|
||||
echo "Deploying new node..."
|
||||
NODE=$(docker run -d --privileged -v /lib/modules:/lib/modules:ro -v /var --net $DOCKER_NETWORK $MAPPING_PORT $DOCKER_IMAGE_NAME:$KUBERNETES_VERSION)
|
||||
sleep 10
|
||||
echo "Joining new node..."
|
||||
docker exec -e JOIN_CMD="$JOIN_CMD" $NODE /bin/bash -c "$JOIN_CMD"
|
||||
|
||||
echo "Node has joined! Remember to install the kind-net CNI by issuing the following command:"
|
||||
echo " $: kubectl apply -f https://raw.githubusercontent.com/aojea/kindnet/master/install-kindnet.yaml"
|
||||
@@ -1,37 +0,0 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
name: kamaji
|
||||
nodes:
|
||||
- role: control-plane
|
||||
image: kindest/node:v1.23.4
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "ingress-ready=true"
|
||||
## required for Cluster API local development
|
||||
extraMounts:
|
||||
- hostPath: /var/run/docker.sock
|
||||
containerPath: /var/run/docker.sock
|
||||
extraPortMappings:
|
||||
## expose port 80 of the node to port 80 on the host
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
protocol: TCP
|
||||
## expose port 443 of the node to port 443 on the host
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
protocol: TCP
|
||||
## expose port 31132 of the node to port 31132 on the host for konnectivity
|
||||
- containerPort: 31132
|
||||
hostPort: 31132
|
||||
protocol: TCP
|
||||
## expose port 31443 of the node to port 31443 on the host
|
||||
- containerPort: 31443
|
||||
hostPort: 31443
|
||||
protocol: TCP
|
||||
## expose port 6443 of the node to port 8443 on the host
|
||||
- containerPort: 6443
|
||||
hostPort: 8443
|
||||
protocol: TCP
|
||||
@@ -1,694 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
automountServiceAccountToken: true
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
data:
|
||||
allow-snippet-annotations: 'true'
|
||||
---
|
||||
# Source: ingress-nginx/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
# Source: ingress-nginx/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
- secrets
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames:
|
||||
- ingress-controller-leader
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-rolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-service-webhook.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller-admission
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: https-webhook
|
||||
port: 443
|
||||
targetPort: webhook
|
||||
appProtocol: https
|
||||
selector:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
type: NodePort
|
||||
ipFamilyPolicy: SingleStack
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
appProtocol: http
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
appProtocol: https
|
||||
selector:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
revisionHistoryLimit: 10
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
minReadySeconds: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
spec:
|
||||
dnsPolicy: ClusterFirst
|
||||
containers:
|
||||
- name: controller
|
||||
image: k8s.gcr.io/ingress-nginx/controller:v1.1.0@sha256:f766669fdcf3dc26347ed273a55e754b427eb4411ee075a53f30718b4499076a
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --election-id=ingress-controller-leader
|
||||
- --controller-class=k8s.io/ingress-nginx
|
||||
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
|
||||
- --validating-webhook=:8443
|
||||
- --validating-webhook-certificate=/usr/local/certificates/cert
|
||||
- --validating-webhook-key=/usr/local/certificates/key
|
||||
- --watch-ingress-without-class=true
|
||||
- --publish-status-address=localhost
|
||||
- --enable-ssl-passthrough=true
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
runAsUser: 101
|
||||
allowPrivilegeEscalation: true
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LD_PRELOAD
|
||||
value: /usr/local/lib/libmimalloc.so
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
hostPort: 80
|
||||
- name: https
|
||||
containerPort: 443
|
||||
protocol: TCP
|
||||
hostPort: 443
|
||||
- name: webhook
|
||||
containerPort: 8443
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: webhook-cert
|
||||
mountPath: /usr/local/certificates/
|
||||
readOnly: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 90Mi
|
||||
nodeSelector:
|
||||
ingress-ready: 'true'
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
operator: Equal
|
||||
serviceAccountName: ingress-nginx
|
||||
terminationGracePeriodSeconds: 0
|
||||
volumes:
|
||||
- name: webhook-cert
|
||||
secret:
|
||||
secretName: ingress-nginx-admission
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-ingressclass.yaml
|
||||
# We don't support namespaced ingressClass yet
|
||||
# So a ClusterRole and a ClusterRoleBinding is required
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: nginx
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
|
||||
# before changing this value, check the required kubernetes version
|
||||
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
name: ingress-nginx-admission
|
||||
webhooks:
|
||||
- name: validate.nginx.ingress.kubernetes.io
|
||||
matchPolicy: Equivalent
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- ingresses
|
||||
failurePolicy: Fail
|
||||
sideEffects: None
|
||||
admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: ingress-nginx
|
||||
name: ingress-nginx-controller-admission
|
||||
path: /networking/v1/ingresses
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
rules:
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- validatingwebhookconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx-admission
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx-admission
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ingress-nginx-admission-create
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: ingress-nginx-admission-create
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
spec:
|
||||
containers:
|
||||
- name: create
|
||||
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- create
|
||||
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
|
||||
- --namespace=$(POD_NAMESPACE)
|
||||
- --secret-name=ingress-nginx-admission
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: ingress-nginx-admission
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 2000
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ingress-nginx-admission-patch
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
helm.sh/hook: post-install,post-upgrade
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: ingress-nginx-admission-patch
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.10
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: 1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
spec:
|
||||
containers:
|
||||
- name: patch
|
||||
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- patch
|
||||
- --webhook-name=ingress-nginx-admission
|
||||
- --namespace=$(POD_NAMESPACE)
|
||||
- --patch-mutating=false
|
||||
- --secret-name=ingress-nginx-admission
|
||||
- --patch-failure-policy=Fail
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: ingress-nginx-admission
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 2000
|
||||
128
docs/content/cluster-api/cluster-autoscaler.md
Normal file
@@ -0,0 +1,128 @@
|
||||
# Cluster Autoscaler
|
||||
|
||||
The [Cluster Autoscaler](https://github.com/kubernetes/autoscaler) is a tool that automatically adjusts the size of a Kubernetes cluster so that all pods have a place to run and there are no unneeded nodes.
|
||||
|
||||
When pods are unschedulable because there are not enough resources, Cluster Autoscaler scales up the cluster. When nodes are underutilized, Cluster Autoscaler scales down the cluster.
|
||||
|
||||
Cluster API supports the Cluster Autoscaler. See the [Cluster Autoscaler on Cluster API](https://cluster-api.sigs.k8s.io/tasks/automated-machine-management/autoscaling) for more information.
|
||||
|
||||
## Getting started with the Cluster Autoscaler on Kamaji
|
||||
|
||||
Kamaji supports the Cluster Autoscaler through Cluster API. There are several way to run the Cluster autoscaler with Cluster API. In this guide, we're leveraging the unique features of Kamaji to run the Cluster Autoscaler as part of Hosted Control Plane.
|
||||
|
||||
In other words, the Cluster Autoscaler is running as a pod in the Kamaji Management Cluster, side by side with the Tenant Control Plane pods, and connecting directly to the apiserver of the workload cluster, hiding sensitive data and information from the tenant: this can be done by mounting the kubeconfig of the tenant cluster in the Cluster Autoscaler pod.
|
||||
|
||||
### Create the workload cluster
|
||||
|
||||
Create a workload cluster using the Kamaji Control Plane Provider and the Infrastructure Provider of choice. The following example creates a workload cluster using the vSphere Infrastructure Provider:
|
||||
|
||||
The template file [`capi-kamaji-vsphere-autoscaler-template.yaml`](https://raw.githubusercontent.com/clastix/cluster-api-control-plane-provider-kamaji/master/templates/vsphere/capi-kamaji-vsphere-autoscaler-template.yaml) provides a full example of a cluster with autoscaler enabled. You can generate the cluster manifest using `clusterctl`.
|
||||
|
||||
Before you need to list all the variables in the template file:
|
||||
|
||||
```bash
|
||||
cat capi-kamaji-vsphere-autoscaler-template.yaml | clusterctl generate yaml --list-variables
|
||||
```
|
||||
|
||||
Fill them with the desired values and generate the manifest:
|
||||
|
||||
```bash
|
||||
clusterctl generate yaml \
|
||||
--from capi-kamaji-vsphere-autoscaler-template.yaml \
|
||||
> capi-kamaji-vsphere-cluster.yaml
|
||||
```
|
||||
|
||||
Apply the generated manifest to create the ClusterClass:
|
||||
|
||||
```bash
|
||||
kubectl apply -f capi-kamaji-vsphere-cluster.yaml
|
||||
```
|
||||
|
||||
### Install the Cluster Autoscaler
|
||||
|
||||
Install the Cluster Autoscaler via Helm in the Management Cluster, in the same namespace where workload cluster is deployed.
|
||||
|
||||
!!! info "Options for install Cluster Autoscaler"
|
||||
Cluster Autoscaler works on a single cluster: it means every cluster must have its own Cluster Autoscaler instance. This could be solved by leveraging on Project Sveltos automations, by deploying a Cluster Autoscaler instance for each Kamaji Cluster API instance.
|
||||
|
||||
```bash
|
||||
helm repo add autoscaler https://kubernetes.github.io/autoscaler
|
||||
helm repo update
|
||||
helm upgrade --install ${CLUSTER_NAME}-autoscaler autoscaler/cluster-autoscaler \
|
||||
--set cloudProvider=clusterapi \
|
||||
--set autodiscvovery.namespace=default \
|
||||
--set "autoDiscovery.labels[0].autoscaling=enabled" \
|
||||
--set clusterAPIKubeconfigSecret=${CLUSTER_NAME}-kubeconfig \
|
||||
--set clusterAPIMode=kubeconfig-incluster
|
||||
```
|
||||
|
||||
The `autoDiscovery.labels` values are used to pick dynamically clusters to autoscale.
|
||||
|
||||
Such labels must be set on the workload cluster, in the `Cluster` and `MachineDeployment` resources.
|
||||
|
||||
```yaml
|
||||
apiVersion: cluster.x-k8s.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/cluster-name: sample
|
||||
# Cluster Autoscaler labels
|
||||
autoscaling: enabled
|
||||
name: sample
|
||||
|
||||
# other fields omitted for brevity
|
||||
---
|
||||
apiVersion: cluster.x-k8s.io/v1beta1
|
||||
kind: MachineDeployment
|
||||
metadata:
|
||||
annotations:
|
||||
# Cluster Autoscaler annotations
|
||||
cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "0"
|
||||
cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "6"
|
||||
labels:
|
||||
cluster.x-k8s.io/cluster-name: sample
|
||||
# Cluster Autoscaler labels
|
||||
autoscaling: enabled
|
||||
name: sample-md-0
|
||||
|
||||
# other fields omitted for brevity
|
||||
---
|
||||
# other Cluster API resources omitted for brevity
|
||||
```
|
||||
|
||||
|
||||
### Verify the Cluster Autoscaler
|
||||
|
||||
To verify the Cluster Autoscaler is working as expected, you can deploy a workload in the Tenant cluster with some CPU requirements in order to simulate workload requiring resources.
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: hello-node
|
||||
name: hello-node
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hello-node
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hello-node
|
||||
spec:
|
||||
containers:
|
||||
- image: quay.io/google-containers/pause-amd64:3.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: pause-amd64
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
```
|
||||
|
||||
Apply the workload to the Tenant cluster and simulate the load spike by increasing the replicas. The Cluster Autoscaler should scale up the cluster to accommodate the workload. Cooldown time must be configured properly on a cluster basis.
|
||||
|
||||
!!! warning "Possible Resource Wasting"
|
||||
With Cluster Autoscaler, new machines are automatically created in a very short time, ending up with some up-provisioning and potentially wasting resources. The official Cluster Autosclaler documentation must be understood to provide correct values according to the infrastructure and provisioning times.
|
||||
104
docs/content/cluster-api/cluster-class.md
Normal file
@@ -0,0 +1,104 @@
|
||||
# Cluster Class
|
||||
|
||||
Kamaji supports **ClusterClass**, a simple way to create many clusters of a similar shape. This is useful for creating many clusters with the same configuration, such as a development cluster, a staging cluster, and a production cluster.
|
||||
|
||||
!!! warning "Experimental Feature"
|
||||
ClusterClass is an experimental feature of Cluster API. As with any experimental features it should be used with caution as it may be unreliable. All experimental features are not subject to any compatibility or deprecation policy and are not yet recommended for production use.
|
||||
|
||||
You can read more about ClusterClass in the [Cluster API documentation](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/).
|
||||
|
||||
## Enabling ClusterClass
|
||||
|
||||
To enable ClusterClass, you need to set `CLUSTER_TOPOLOGY` before running `clusterctl init`. This will enable the Cluster API feature gate for ClusterClass.
|
||||
|
||||
```bash
|
||||
export CLUSTER_TOPOLOGY=true
|
||||
clusterctl init --infrastructure vsphere --control-plane kamaji
|
||||
```
|
||||
|
||||
## Creating a ClusterClass
|
||||
|
||||
To create a ClusterClass, you need to create a `ClusterClass` custom resource. Here is an example of a `ClusterClass` that will create a cluster running control plane on the Kamaji Management Cluster and worker nodes on vSphere:
|
||||
|
||||
```yaml
|
||||
apiVersion: cluster.x-k8s.io/v1beta1
|
||||
kind: ClusterClass
|
||||
metadata:
|
||||
name: kamaji-clusterclass
|
||||
spec:
|
||||
controlPlane:
|
||||
ref:
|
||||
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
|
||||
kind: KamajiControlPlaneTemplate
|
||||
name: kamaji-clusterclass-kamaji-control-plane-template
|
||||
infrastructure:
|
||||
ref:
|
||||
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
|
||||
kind: VSphereClusterTemplate
|
||||
name: kamaji-clusterclass-vsphere-cluster-template
|
||||
workers:
|
||||
machineDeployments:
|
||||
- class: kamaji-clusterclass
|
||||
template:
|
||||
bootstrap:
|
||||
ref:
|
||||
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
|
||||
kind: KubeadmConfigTemplate
|
||||
name: kamaji-clusterclass-kubeadm-config-template
|
||||
infrastructure:
|
||||
ref:
|
||||
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
|
||||
kind: VSphereMachineTemplate
|
||||
name: kamaji-clusterclass-vsphere-machine-template
|
||||
|
||||
# other resources omitted for brevity ...
|
||||
```
|
||||
|
||||
The template file [`capi-kamaji-vsphere-class-template.yaml`](https://raw.githubusercontent.com/clastix/cluster-api-control-plane-provider-kamaji/master/templates/vsphere/capi-kamaji-vsphere-class-template.yaml) provides a full example of a ClusterClass for vSphere. You can generate a ClusterClass manifest using `clusterctl`.
|
||||
|
||||
Before you need to list all the variables in the template file:
|
||||
|
||||
```bash
|
||||
cat capi-kamaji-vsphere-class-template.yaml | clusterctl generate yaml --list-variables
|
||||
```
|
||||
|
||||
Fill them with the desired values and generate the manifest:
|
||||
|
||||
```bash
|
||||
clusterctl generate yaml \
|
||||
--from capi-kamaji-vsphere-class-template.yaml \
|
||||
> capi-kamaji-vsphere-class.yaml
|
||||
```
|
||||
|
||||
Apply the generated manifest to create the ClusterClass:
|
||||
|
||||
```bash
|
||||
kubectl apply -f capi-kamaji-vsphere-class.yaml
|
||||
```
|
||||
|
||||
## Creating a Cluster from a ClusterClass
|
||||
|
||||
Once a ClusterClass is created, you can create a Cluster using the ClusterClass. Here is an example of a Cluster that uses the `kamaji-clusterclass`:
|
||||
|
||||
```yaml
|
||||
apiVersion: cluster.x-k8s.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: sample
|
||||
spec:
|
||||
topology:
|
||||
class: kamaji-clusterclass
|
||||
classNamespace: capi-clusterclass
|
||||
version: v1.31.0
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
workers:
|
||||
machineDeployments:
|
||||
- class: kamaji-clusterclass
|
||||
name: md-sample
|
||||
replicas: 3
|
||||
|
||||
# other resources omitted for brevity ...
|
||||
```
|
||||
|
||||
Always refer to the [Cluster API documentation](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/) for the most up-to-date information on ClusterClass.
|
||||
98
docs/content/cluster-api/control-plane-provider.md
Normal file
@@ -0,0 +1,98 @@
|
||||
# Kamaji Control Plane Provider
|
||||
|
||||
Kamaji can act as a Cluster API Control Plane provider using the `KamajiControlPlane` custom resource, which defines the control plane of a Tenant Cluster.
|
||||
|
||||
Here is an example of a `KamajiControlPlane`:
|
||||
|
||||
```yaml
|
||||
kind: KamajiControlPlane
|
||||
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
|
||||
metadata:
|
||||
name: '${CLUSTER_NAME}'
|
||||
namespace: '${CLUSTER_NAMESPACE}'
|
||||
spec:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
- --cloud-provider=external
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
- --cloud-provider=external
|
||||
dataStoreName: default
|
||||
addons:
|
||||
coreDNS: {}
|
||||
kubeProxy: {}
|
||||
konnectivity: {}
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
preferredAddressTypes:
|
||||
- InternalIP
|
||||
network:
|
||||
serviceType: LoadBalancer
|
||||
version: ${KUBERNETES_VERSION}
|
||||
```
|
||||
|
||||
You can use this as reference in a standard `Cluster` custom resource as controlplane provider:
|
||||
|
||||
```yaml
|
||||
kind: Cluster
|
||||
apiVersion: cluster.x-k8s.io/v1beta1
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}'
|
||||
name: '${CLUSTER_NAME}'
|
||||
namespace: '${CLUSTER_NAMESPACE}'
|
||||
spec:
|
||||
controlPlaneRef:
|
||||
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
|
||||
kind: KamajiControlPlane
|
||||
name: '${CLUSTER_NAME}'
|
||||
clusterNetwork:
|
||||
pods:
|
||||
cidrBlocks:
|
||||
- '${PODS_CIDR}'
|
||||
services:
|
||||
cidrBlocks:
|
||||
- '${SERVICES_CIDR}'
|
||||
infrastructureRef:
|
||||
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
|
||||
kind: ... # your infrastructure kind may vary
|
||||
name: '${CLUSTER_NAME}'
|
||||
```
|
||||
|
||||
!!! info "Full Reference"
|
||||
For a full reference of the `KamajiControlPlane` custom resource, please see the [Reference APIs](https://doc.crds.dev/github.com/clastix/cluster-api-control-plane-provider-kamaji/controlplane.cluster.x-k8s.io/KamajiControlPlane/v1alpha1).
|
||||
|
||||
## Getting started with the Kamaji Control Plane Provider
|
||||
|
||||
Cluster API Provider Kamaji is compliant with the `clusterctl` contract, which means you can use it with the `clusterctl` CLI to create and manage your Kamaji based clusters.
|
||||
|
||||
!!! info "Options for install Cluster API"
|
||||
There are two ways to getting started with Cluster API:
|
||||
|
||||
* using `clusterctl` to install the Cluster API components.
|
||||
* using the Cluster API Operator. Please refer to the [Cluster API Operator](https://cluster-api-operator.sigs.k8s.io/) guide for this option.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
* [`clusterctl`](https://cluster-api.sigs.k8s.io/user/quick-start#install-clusterctl) installed in your workstation to handle the lifecycle of your clusters.
|
||||
* [`kubectl`](https://kubernetes.io/docs/tasks/tools/) installed in your workstation to interact with your clusters.
|
||||
* [Kamaji](../getting-started/index.md) installed in your Management Cluster.
|
||||
|
||||
### Initialize the Management Cluster
|
||||
|
||||
Use `clusterctl` to initialize the Management Cluster. When executed for the first time, `clusterctl init` will fetch and install the Cluster API components in the Management Cluster
|
||||
|
||||
```bash
|
||||
clusterctl init --control-plane kamaji
|
||||
```
|
||||
|
||||
As result, the following Cluster API components will be installed:
|
||||
|
||||
* Cluster API Provider in `capi-system` namespace
|
||||
* Bootstrap Provider in `capi-kubeadm-bootstrap-system` namespace
|
||||
* Kamaji Control Plane Provider in `kamaji-system` namespace
|
||||
|
||||
In the next step, we will create a fully functional Kubernetes cluster using the Kamaji Control Plane Provider and the Infrastructure provider of choice.
|
||||
|
||||
For a complete list of supported infrastructure providers, please refer to the [other providers](other-providers.md) page.
|
||||
|
||||
14
docs/content/cluster-api/index.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Cluster APIs Support
|
||||
|
||||
The [Cluster API](https://github.com/kubernetes-sigs/cluster-api) brings declarative, Kubernetes-style APIs to the creation, configuration, and management of Kubernetes clusters. If you're not familiar with the Cluster API project, you can learn more from the [official documentation](https://cluster-api.sigs.k8s.io/).
|
||||
|
||||
Users can utilize Kamaji in two distinct ways:
|
||||
|
||||
* **Standalone:** Kamaji can be used as a standalone Kubernetes Operator installed in the Management Cluster to manage multiple Tenant Control Planes. Worker nodes of Tenant Clusters can join any infrastructure, whether it be cloud, data-center, or edge, using various automation tools such as _Ansible_, _Terraform_, or even manually with any script calling `kubeadm`. See [yaki](https://goyaki.clastix.io/) as an example.
|
||||
|
||||
* **Cluster API Provider:** Kamaji can be used as a [Cluster API Control Plane Provider](https://cluster-api.sigs.k8s.io/reference/providers#control-plane) to manage multiple Tenant Control Planes across various infrastructures. Kamaji offers seamless integration with the most popular [Cluster API Infrastructure Providers](https://cluster-api.sigs.k8s.io/reference/providers#infrastructure).
|
||||
|
||||
!!! tip "Control Plane and Infrastructure Decoupling"
|
||||
Kamaji decouples the Control Plane from the infrastructure, allowing the Kamaji Management Cluster to reside on a different infrastructure or cloud provider than the Tenant worker machines, as long as network reachability is ensured. This flexibility enables mixing and matching infrastructure providers, such as hosting the Management Cluster on a public cloud while deploying Tenant worker machines on private data centers, edge environments, or other clouds.
|
||||
|
||||
Check the currently supported infrastructure providers and the roadmap on the related [repository](https://github.com/clastix/cluster-api-control-plane-provider-kamaji).
|
||||
21
docs/content/cluster-api/other-providers.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Other Infra Providers
|
||||
|
||||
Kamaji offers seamless integration with the most popular [Cluster API Infrastructure Providers](https://cluster-api.sigs.k8s.io/reference/providers#infrastructure):
|
||||
|
||||
- AWS
|
||||
- Azure
|
||||
- Google Cloud
|
||||
- Equinix/Packet
|
||||
- Hetzner
|
||||
- KubeVirt
|
||||
- Metal³
|
||||
- Nutanix
|
||||
- OpenStack
|
||||
- Tinkerbell
|
||||
- vSphere
|
||||
- IONOS Cloud
|
||||
- Proxmox by IONOS Cloud
|
||||
|
||||
For the most up-to-date information and technical considerations, please always check the related [repository](https://github.com/clastix/cluster-api-control-plane-provider-kamaji).
|
||||
|
||||
|
||||
192
docs/content/cluster-api/proxmox-infra-provider.md
Normal file
@@ -0,0 +1,192 @@
|
||||
# Proxmox VE Infra Provider
|
||||
|
||||
Use the Cluster API [Proxmox VE Infra Provider ](https://github.com/ionos-cloud/cluster-api-provider-proxmox) to create a fully functional Kubernetes cluster with the Cluster API [Kamaji Control Plane Provider](https://github.com/clastix/cluster-api-control-plane-provider-kamaji).
|
||||
|
||||
The Proxmox Cluster API implementation is developed and maintained by [IONOS Cloud](https://github.com/ionos-cloud).
|
||||
|
||||
## Proxmox VE Requirements
|
||||
|
||||
A Template VM built using the [Proxmox Builder](https://image-builder.sigs.k8s.io/capi/providers/proxmox) is necessary to create the cluster machines.
|
||||
|
||||
## Install the Proxmox VE Infrastructure Provider
|
||||
|
||||
To use the Proxmox Cluster API provider, you must connect and authenticate to a Proxmox VE system.
|
||||
|
||||
```bash
|
||||
# The Proxmox VE host
|
||||
export PROXMOX_URL: "https://pve.example:8006"
|
||||
|
||||
# The Proxmox VE TokenID for authentication
|
||||
export PROXMOX_TOKEN: "clastix@pam!capi"
|
||||
|
||||
# The secret associated with the TokenID
|
||||
export PROXMOX_SECRET: "REDACTED"
|
||||
```
|
||||
|
||||
Install the Infrastructure Provider:
|
||||
|
||||
```bash
|
||||
clusterctl init --infrastructure proxmox
|
||||
```
|
||||
|
||||
## Install the IPAM Provider
|
||||
|
||||
To assign IP addresses to nodes, you can use the in-cluster [IPAM provider](https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster). To do so, initialize the Management Cluster with the `--ipam in-cluster` flag:
|
||||
|
||||
```bash
|
||||
clusterctl init --ipam in-cluster
|
||||
```
|
||||
|
||||
## Create a Tenant Cluster
|
||||
|
||||
Once all controllers are running in the management cluster, you can generate and apply the cluster manifests for the tenant cluster you want to provision.
|
||||
|
||||
### Generate the Cluster Manifest using the template
|
||||
|
||||
Use `clusterctl` to generate a tenant cluster manifest for your Proxmox VE. Set the following environment variables to match the workload cluster configuration:
|
||||
|
||||
```bash
|
||||
# Cluster Configuration
|
||||
export CLUSTER_NAME="sample"
|
||||
export CLUSTER_NAMESPACE="default"
|
||||
export CONTROL_PLANE_REPLICAS=2
|
||||
export KUBERNETES_VERSION="v1.31.4"
|
||||
export CLUSTER_DATASTORE="default"
|
||||
```
|
||||
|
||||
Set the following environment variables to configure the workload cluster network:
|
||||
|
||||
```bash
|
||||
# Networking Configuration
|
||||
export IP_RANGE='["192.168.100.100-192.168.100.200"]'
|
||||
export IP_PREFIX=24
|
||||
export GATEWAY="192.168.100.1"
|
||||
export DNS_SERVERS='["8.8.8.8"]'
|
||||
export NETWORK_BRIDGE="vmbr0"
|
||||
export NETWORK_MODEL="virtio"
|
||||
```
|
||||
|
||||
Set the following environment variables to configure the workload machines:
|
||||
|
||||
```bash
|
||||
# Node Configuration
|
||||
export SSH_USER="clastix"
|
||||
export SSH_AUTHORIZED_KEY="ssh-rsa AAAAB3Nz ..."
|
||||
export NODE_REPLICAS=2
|
||||
|
||||
# Resource Configuration
|
||||
export SOURCE_NODE="labs"
|
||||
export TEMPLATE_ID=100
|
||||
export ALLOWED_NODES='["labs"]'
|
||||
export MEMORY_MIB=4096
|
||||
export NUM_CORES=2
|
||||
export NUM_SOCKETS=2
|
||||
export BOOT_VOLUME_DEVICE="scsi0"
|
||||
export BOOT_VOLUME_SIZE=20
|
||||
export FILE_STORAGE_FORMAT="qcow2"
|
||||
export STORAGE_NODE="local"
|
||||
```
|
||||
|
||||
Use the following command to generate a cluster manifest based on the [`capi-kamaji-proxmox-template.yaml`](https://raw.githubusercontent.com/clastix/cluster-api-control-plane-provider-kamaji/master/templates/proxmox/capi-kamaji-proxmox-template.yaml) template file:
|
||||
|
||||
```bash
|
||||
clusterctl generate cluster $CLUSTER_NAME \
|
||||
--from capi-kamaji-proxmox-template.yaml \
|
||||
> capi-kamaji-proxmox-cluster.yaml
|
||||
```
|
||||
|
||||
### Additional cloud-init configuration
|
||||
|
||||
Cluster API requires machine templates based on `cloud-init`. You can add additional `cloud-init` configuration to further customize the worker nodes by including an additional `cloud-init` file in the `KubeadmConfigTemplate`:
|
||||
|
||||
```yaml
|
||||
kind: KubeadmConfigTemplate
|
||||
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
|
||||
metadata:
|
||||
name: ${CLUSTER_NAME}-md-0
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
files:
|
||||
- path: "/etc/cloud/cloud.cfg.d/99-custom.cfg"
|
||||
content: "${CLOUD_INIT_CONFIG:-}"
|
||||
owner: "root:root"
|
||||
permissions: "0644"
|
||||
```
|
||||
|
||||
You can then set the `CLOUD_INIT_CONFIG` environment variable to include the additional configuration:
|
||||
|
||||
```bash
|
||||
export CLOUD_INIT_CONFIG="#cloud-config package_update: true packages: - net-tools"
|
||||
```
|
||||
|
||||
and include it in the `clusterctl generate cluster` command:
|
||||
|
||||
```bash
|
||||
clusterctl generate cluster $CLUSTER_NAME \
|
||||
--from capi-kamaji-proxmox-template.yaml \
|
||||
> capi-kamaji-proxmox-cluster.yaml
|
||||
```
|
||||
|
||||
### Apply the Cluster Manifest
|
||||
|
||||
Apply the generated cluster manifest to provision the tenant cluster:
|
||||
|
||||
```bash
|
||||
kubectl apply -f capi-kamaji-proxmox-cluster.yaml
|
||||
```
|
||||
|
||||
Check the status of the cluster deployment using `clusterctl`:
|
||||
|
||||
```bash
|
||||
clusterctl describe cluster $CLUSTER_NAME
|
||||
```
|
||||
|
||||
and related tenant control plane created on the Kamaji Management Cluster:
|
||||
|
||||
```bash
|
||||
kubectl get tcp -n default
|
||||
```
|
||||
|
||||
## Access the Tenant Cluster
|
||||
|
||||
To access the tenant cluster, you can estract the `kubeconfig` file from the Kamaji Management Cluster:
|
||||
|
||||
```bash
|
||||
clusterctl get kubeconfig $CLUSTER_NAME \
|
||||
> ~/.kube/$CLUSTER_NAME.kubeconfig
|
||||
```
|
||||
|
||||
and use it to access the tenant cluster:
|
||||
|
||||
```bash
|
||||
export KUBECONFIG=~/.kube/$CLUSTER_NAME.kubeconfig
|
||||
kubectl cluster-info
|
||||
```
|
||||
|
||||
## Delete the Tenant Cluster
|
||||
|
||||
For cluster deletion, use the following command:
|
||||
|
||||
```bash
|
||||
kubectl delete cluster $CLUSTER_NAME
|
||||
```
|
||||
|
||||
Always use `kubectl delete cluster $CLUSTER_NAME` to delete the tenant cluster. Using `kubectl delete -f capi-kamaji-proxmox-cluster.yaml` may lead to orphaned resources in some scenarios, as this method doesn't always respect ownership references between resources that were created after the initial deployment.
|
||||
|
||||
## Install the Tenant Cluster as Helm Release
|
||||
|
||||
Alternatively, you can create a Tenant Cluster using the Helm Chart [cluster-api-kamaji-proxmox](https://github.com/clastix/cluster-api-kamaji-proxmox).
|
||||
|
||||
Create a Tenant Cluster as Helm Release:
|
||||
|
||||
```bash
|
||||
helm repo add clastix https://clastix.github.io/cluster-api-kamaji-proxmox
|
||||
helm repo update
|
||||
helm install sample clastix/cluster-api-kamaji-proxmox \
|
||||
--set cluster.name=sample \
|
||||
--namespace default \
|
||||
--values my-values.yaml
|
||||
```
|
||||
|
||||
where `my-values.yaml` is a file containing the configuration values for the Tenant Cluster.
|
||||
284
docs/content/cluster-api/vsphere-infra-provider.md
Normal file
@@ -0,0 +1,284 @@
|
||||
# vSphere Infra Provider
|
||||
|
||||
Use the Cluster API [vSphere Infra Provider](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere) to create a fully functional Kubernetes cluster using the Cluster API [Kamaji Control Plane Provider](https://github.com/clastix/cluster-api-control-plane-provider-kamaji).
|
||||
|
||||
## vSphere Requirements
|
||||
|
||||
You need to access a **vSphere** environment with the following requirements:
|
||||
|
||||
- The vSphere environment should be configured with a DHCP service in the primary VM network for your tenant clusters. Alternatively you can use an [IPAM Provider](https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster).
|
||||
|
||||
- Configure one Resource Pool across the hosts onto which the tenant clusters will be provisioned. Every host in the Resource Pool will need access to a shared storage.
|
||||
|
||||
- A Template VM based on published [OVA images](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere). For production-like environments, it is highly recommended to build and use your own custom OVA images. Take a look to the [image-builder](https://github.com/kubernetes-sigs/image-builder) project.
|
||||
|
||||
- To use the vSphere Container Storage Interface (CSI), your vSphere cluster needs support for Cloud Native Storage (CNS). CNS relies on a shared datastore. Ensure that your vSphere environment is properly configured to support CNS.
|
||||
|
||||
## Install the vSphere Infrastructure Provider
|
||||
|
||||
In order to use vSphere Cluster API provider, you must be able to connect and authenticate to a **vCenter**. Ensure you have credentials to your vCenter server:
|
||||
|
||||
```bash
|
||||
export VSPHERE_USERNAME="admin@vsphere.local"
|
||||
export VSPHERE_PASSWORD="*******"
|
||||
```
|
||||
|
||||
Install the vSphere Infrastructure Provider:
|
||||
|
||||
```bash
|
||||
clusterctl init --infrastructure vsphere
|
||||
```
|
||||
|
||||
## Install the IPAM Provider
|
||||
|
||||
If you intend to use IPAM to assign addresses to the nodes, you can use the in-cluster [IPAM provider](https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster) instead of rely on DHCP service. To do so, initialize the Management Cluster with the `--ipam in-cluster` flag:
|
||||
|
||||
```bash
|
||||
clusterctl init --ipam in-cluster
|
||||
```
|
||||
|
||||
## Create a Tenant Cluster
|
||||
|
||||
Once all the controllers are up and running in the management cluster, you can generate and apply the cluster manifests of the tenant cluster you want to provision.
|
||||
|
||||
### Generate the Cluster Manifest using the template
|
||||
|
||||
Using `clusterctl`, you can generate a tenant cluster manifest for your vSphere environment. Set the environment variables to match your vSphere configuration.
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
# vSphere Configuration
|
||||
export VSPHERE_SERVER="vcenter.vsphere.local"
|
||||
export VSPHERE_DATACENTER="SDDC-Datacenter"
|
||||
export VSPHERE_DATASTORE="DefaultDatastore"
|
||||
export VSPHERE_NETWORK="VM Network"
|
||||
export VSPHERE_RESOURCE_POOL="*/Resources"
|
||||
export VSPHERE_FOLDER="kamaji-capi-pool"
|
||||
export VSPHERE_TLS_THUMBPRINT="..."
|
||||
export VSPHERE_STORAGE_POLICY="vSAN Storage Policy"
|
||||
```
|
||||
|
||||
If you intend to use IPAM, set the environment variables to match your IPAM configuration.
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
# IPAM Configuration
|
||||
export NODE_IPAM_POOL_RANGE="10.9.62.100-10.9.62.200"
|
||||
export NODE_IPAM_POOL_PREFIX="24"
|
||||
export NODE_IPAM_POOL_GATEWAY="10.9.62.1"
|
||||
```
|
||||
|
||||
Set the environment variables to match your cluster configuration.
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
# Cluster Configuration
|
||||
export CLUSTER_NAME="sample"
|
||||
export CLUSTER_NAMESPACE="default"
|
||||
export POD_CIDR="10.36.0.0/16"
|
||||
export SVC_CIDR="10.96.0.0/16"
|
||||
export CONTROL_PLANE_REPLICAS=2
|
||||
export NAMESERVER="8.8.8.8"
|
||||
export KUBERNETES_VERSION="v1.31.0"
|
||||
export CPI_IMAGE_VERSION="v1.31.0"
|
||||
```
|
||||
|
||||
Set the environment variables to match your machine configuration.
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
# Machine Configuration
|
||||
export MACHINE_TEMPLATE="ubuntu-2404-kube-v1.31.0"
|
||||
export MACHINE_DEPLOY_REPLICAS=2
|
||||
export NODE_DISK_SIZE=25
|
||||
export NODE_MEMORY_SIZE=8192
|
||||
export NODE_CPU_COUNT=2
|
||||
export SSH_USER="clastix"
|
||||
export SSH_AUTHORIZED_KEY="ssh-rsa AAAAB3N..."
|
||||
```
|
||||
|
||||
The following command will generate a cluster manifest based on the [`capi-kamaji-vsphere-template.yaml`](https://raw.githubusercontent.com/clastix/cluster-api-control-plane-provider-kamaji/master/templates/vsphere/capi-kamaji-vsphere-template.yaml) template file:
|
||||
|
||||
```bash
|
||||
clusterctl generate cluster $CLUSTER_NAME \
|
||||
--from capi-kamaji-vsphere-template.yaml \
|
||||
> capi-kamaji-vsphere-cluster.yaml
|
||||
```
|
||||
|
||||
If you want to use DHCP instead of IPAM, use the [`capi-kamaji-vsphere-dhcp-template.yaml`](https://raw.githubusercontent.com/clastix/cluster-api-control-plane-provider-kamaji/master/templates/vsphere/capi-kamaji-vsphere-dhcp-template.yaml) template file:
|
||||
|
||||
```bash
|
||||
clusterctl generate cluster $CLUSTER_NAME \
|
||||
--from capi-kamaji-vsphere-dhcp-template.yaml \
|
||||
> capi-kamaji-vsphere-cluster.yaml
|
||||
```
|
||||
|
||||
### Additional cloud-init configuration
|
||||
|
||||
Cluster API requires to use templates for the machines, which are based on `cloud-init`. You can add additional `cloud-init` configuration to further customize the worker nodes by including an additional `cloud-init` file in the `KubeadmConfigTemplate`:
|
||||
|
||||
```yaml
|
||||
kind: KubeadmConfigTemplate
|
||||
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
|
||||
metadata:
|
||||
name: ${CLUSTER_NAME}-md-0
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
files:
|
||||
- path: "/etc/cloud/cloud.cfg.d/99-custom.cfg"
|
||||
content: "${CLOUD_INIT_CONFIG:-}"
|
||||
owner: "root:root"
|
||||
permissions: "0644"
|
||||
```
|
||||
|
||||
You can then set the `CLOUD_INIT_CONFIG` environment variable to include the additional configuration:
|
||||
|
||||
```bash
|
||||
export CLOUD_INIT_CONFIG="#cloud-config package_update: true packages: - net-tools"
|
||||
```
|
||||
|
||||
and include it in the `clusterctl generate cluster` command:
|
||||
|
||||
```bash
|
||||
clusterctl generate cluster $CLUSTER_NAME \
|
||||
--from capi-kamaji-vsphere-template.yaml \
|
||||
> capi-kamaji-vsphere-cluster.yaml
|
||||
```
|
||||
|
||||
### Apply the Cluster Manifest
|
||||
|
||||
Apply the generated cluster manifest to create the tenant cluster:
|
||||
|
||||
```bash
|
||||
kubectl apply -f capi-kamaji-vsphere-cluster.yaml
|
||||
```
|
||||
|
||||
You can check the status of the cluster deployment with `clusterctl`:
|
||||
|
||||
```bash
|
||||
clusterctl describe cluster $CLUSTER_NAME
|
||||
```
|
||||
|
||||
You can check the status of the tenant cluster with `kubectl`:
|
||||
|
||||
```bash
|
||||
kubectl get clusters -n default
|
||||
```
|
||||
|
||||
and related tenant control plane created on the Kamaji Management Cluster:
|
||||
|
||||
```bash
|
||||
kubectl get tcp -n default
|
||||
```
|
||||
|
||||
## Access the Tenant Cluster
|
||||
|
||||
To access the tenant cluster, you can estract the `kubeconfig` file from the Kamaji Management Cluster:
|
||||
|
||||
```bash
|
||||
clusterctl get kubeconfig $CLUSTER_NAME \
|
||||
> ~/.kube/$CLUSTER_NAME.kubeconfig
|
||||
```
|
||||
|
||||
and use it to access the tenant cluster:
|
||||
|
||||
```bash
|
||||
export KUBECONFIG=~/.kube/$CLUSTER_NAME.kubeconfig
|
||||
kubectl cluster-info
|
||||
```
|
||||
|
||||
## Cloud Controller Manager
|
||||
|
||||
The template file [`capi-kamaji-vsphere-template.yaml`](https://raw.githubusercontent.com/clastix/cluster-api-control-plane-provider-kamaji/master/templates/vsphere/capi-kamaji-vsphere-template.yaml) includes the external [Cloud Controller Manager (CCM)](https://github.com/kubernetes/cloud-provider-vsphere) configuration for vSphere. The CCM is a Kubernetes controller that manages the cloud provider's resources.
|
||||
|
||||
Usually, the CCM is deployed on control plane nodes, but in Kamaji there are no nodes for Control Plane, so the CCM is deployed on the worker nodes as daemonset.
|
||||
|
||||
As alternative, you can deploy the CCM as part of the Hosted Control Plane on the Management Cluster. To do so, the template file [`capi-kamaji-vsphere-template-ccm.yaml`](https://raw.githubusercontent.com/clastix/cluster-api-control-plane-provider-kamaji/master/templates/vsphere/capi-kamaji-vsphere-template-ccm.yaml) includes the configuration for the CCM as part of the Kamaji Control Plane. This approach provides security benefits by isolating vSphere credentials from tenant users while maintaining full Cluster API integration.
|
||||
|
||||
The following command will generate a cluster manifest with the CCM installed on the Management Cluster:
|
||||
|
||||
```bash
|
||||
clusterctl generate cluster $CLUSTER_NAME \
|
||||
--from capi-kamaji-vsphere-template-ccm.yaml \
|
||||
> capi-kamaji-vsphere-cluster.yaml
|
||||
```
|
||||
|
||||
Apply the generated cluster manifest to create the tenant cluster:
|
||||
|
||||
```bash
|
||||
kubectl apply -f capi-kamaji-vsphere-cluster.yaml
|
||||
```
|
||||
|
||||
## vSphere CSI Driver
|
||||
|
||||
The template file [`capi-kamaji-vsphere-template-csi.yaml`](https://raw.githubusercontent.com/clastix/cluster-api-control-plane-provider-kamaji/master/templates/vsphere/capi-kamaji-vsphere-template-csi.yaml) includes the [vSphere CSI Driver](https://github.com/kubernetes-sigs/vsphere-csi-driver) configuration for vSphere. The vSphere CSI Driver is a Container Storage Interface (CSI) driver that provides a way to use vSphere storage with Kubernetes.
|
||||
|
||||
This template file introduces a *"split configuration"* for the vSphere CSI Driver, with the CSI driver deployed on the worker nodes as daemonset and the CSI Controller Manager deployed on the Management Cluster as part of the Hosted Control Plane. In this way, no vSphere credentials are required on the tenant cluster.
|
||||
|
||||
This split architecture enables:
|
||||
|
||||
* Tenant isolation from vSphere credentials
|
||||
* Simplified networking requirements
|
||||
* Centralized controller management
|
||||
|
||||
The template file also include a default storage class for the vSphere CSI Driver.
|
||||
|
||||
Set the environment variables to match your storage configuration.
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
# Storage Configuration
|
||||
export CSI_INSECURE="false"
|
||||
export CSI_LOG_LEVEL="PRODUCTION" # or "DEVELOPMENT"
|
||||
export CSI_STORAGE_CLASS_NAME="vsphere-csi"
|
||||
```
|
||||
|
||||
The following command will generate a cluster manifest with split configuration for the vSphere CSI Driver:
|
||||
|
||||
```bash
|
||||
clusterctl generate cluster $CLUSTER_NAME \
|
||||
--from capi-kamaji-vsphere-template-csi.yaml \
|
||||
> capi-kamaji-vsphere-cluster.yaml
|
||||
```
|
||||
|
||||
Apply the generated cluster manifest to create the tenant cluster:
|
||||
|
||||
```bash
|
||||
kubectl apply -f capi-kamaji-vsphere-cluster.yaml
|
||||
```
|
||||
|
||||
## Delete the Tenant Cluster
|
||||
|
||||
For cluster deletion, use the following command:
|
||||
|
||||
```bash
|
||||
kubectl delete cluster sample
|
||||
```
|
||||
|
||||
Always use `kubectl delete cluster $CLUSTER_NAME` to delete the tenant cluster. Using `kubectl delete -f capi-kamaji-vsphere-cluster.yaml` may lead to orphaned resources in some scenarios, as this method doesn't always respect ownership references between resources that were created after the initial deployment.
|
||||
|
||||
## Install the Tenant Cluster as Helm Release
|
||||
|
||||
Another option to create a Tenant Cluster is to use the Helm Chart [cluster-api-kamaji-vsphere](https://github.com/clastix/cluster-api-kamaji-vsphere).
|
||||
|
||||
!!! warning "Advanced Usage"
|
||||
This Helm Chart provides several additional configuration options to customize the Tenant Cluster. Please refer to its documentation for more information. Make sure you get comfortable with the Cluster API concepts and Kamaji before to attempt to use it.
|
||||
|
||||
Create a Tenant Cluster as Helm Release:
|
||||
|
||||
```bash
|
||||
helm repo add clastix https://clastix.github.io/cluster-api-kamaji-vsphere
|
||||
helm repo update
|
||||
helm install sample clastix/cluster-api-kamaji-vsphere \
|
||||
--set cluster.name=sample \
|
||||
--namespace default \
|
||||
--values my-values.yaml
|
||||
```
|
||||
|
||||
where `my-values.yaml` is a file containing the configuration values for the Tenant Cluster.
|
||||
@@ -1,54 +0,0 @@
|
||||
# Concepts
|
||||
|
||||
**Kamaji** is a **Kubernetes Control Plane Manager**. It operates Kubernetes at scale with a fraction of the operational burden. Kamaji turns any Kubernetes cluster into a _“Management Cluster”_ to orchestrate other Kubernetes clusters called _“Tenant Clusters”_.
|
||||
|
||||
These are requirements of the design behind Kamaji:
|
||||
|
||||
- Communication between the _“Management Cluster”_ and a _“Tenant Cluster”_ is unidirectional. The _“Management Cluster”_ manages a _“Tenant Cluster”_, but a _“Tenant Cluster”_ has no awareness of the _“Management Cluster”_.
|
||||
- Communication between different _“Tenant Clusters”_ is not allowed.
|
||||
- The worker nodes of tenant should not run anything beyond tenant's workloads.
|
||||
|
||||
Goals and scope may vary as the project evolves.
|
||||
|
||||
## Tenant Control Plane
|
||||
Kamaji is special because the Control Planes of the _“Tenant Clusters”_ are regular pods running in a namespace of the _“Management Cluster”_ instead of a dedicated machines. This solution makes running Control Planes at scale cheaper and easier to deploy and operate. The Tenant Control Plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
|
||||
|
||||
High Availability and rolling updates of the Tenant Control Plane pods are provided by a regular Deployment. Autoscaling based on the metrics is available. A Service is used to espose the Tenant Control Plane outside of the _“Management Cluster”_. The `LoadBalancer` service type is used, `NodePort` and `ClusterIP` are other viable options, depending on the case.
|
||||
|
||||
Kamaji offers a [Custom Resource Definition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing a Tenant Control Plane. This *CRD* is called `TenantControlPlane`, or `tcp` in short.
|
||||
|
||||
All the _“Tenant Clusters”_ built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves. See [CNCF compliance](reference/conformance.md).
|
||||
|
||||
## Tenant worker nodes
|
||||
|
||||
And what about the tenant worker nodes?
|
||||
They are just _"worker nodes"_, i.e. regular virtual or bare metal machines, connecting to the APIs server of the Tenant Control Plane.
|
||||
Kamaji's goal is to manage the lifecycle of hundreds of these _“Tenant Clusters”_, not only one, so how to add another Tenant Cluster to Kamaji?
|
||||
As you could expect, you have just deploys a new Tenant Control Plane in one of the _“Management Cluster”_ namespace, and then joins the tenant worker nodes to it.
|
||||
|
||||
A [Cluster API ControlPlane provider](https://github.com/clastix/cluster-api-control-plane-provider-kamaji) has been released, allowing to offer a Cluster API-native declarative lifecycle, by automating the worker nodes join.
|
||||
|
||||
## Datastores
|
||||
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each Tenant Cluster saves the state to be able to store and retrieve data. As we can deploy a Kubernetes cluster with an external `etcd` cluster, we explored this option for the Tenant Control Planes. On the Management Cluster, you can deploy one or multi-tenant `etcd` to save the state of multiple Tenant Clusters. Kamaji offers a Custom Resource Definition called `DataStore` to provide a declarative approach of managing multiple datastores. By sharing the datastore between multiple tenants, the resiliency is still guaranteed and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that you have to operate external datastores, in addition to `etcd` of the _“Management Cluster”_ and manage the access to be sure that each _“Tenant Cluster”_ uses only its data.
|
||||
|
||||
### Other storage drivers
|
||||
Kamaji offers the option of using a more capable datastore than `etcd` to save the state of multiple tenants' clusters. Thanks to the native [kine](https://github.com/k3s-io/kine) integration, you can run _MySQL_ or _PostgreSQL_ compatible databases as datastore for _“Tenant Clusters”_.
|
||||
|
||||
### Pooling
|
||||
By default, Kamaji is expecting to persist all the _“Tenant Clusters”_ data in a unique datastore that could be backed by different drivers. However, you can pick a different datastore for a specific set of _“Tenant Clusters”_ that could have different resources assigned or a different tiering. Pooling of multiple datastore is an option you can leverage for a very large set of _“Tenant Clusters”_ so you can distribute the load properly. As future improvements, we have a _datastore scheduler_ feature in roadmap so that Kamaji itself can assign automatically a _“Tenant Cluster”_ to the best datastore in the pool.
|
||||
|
||||
### Migration
|
||||
In order to simplify Day2 Operations and reduce the operational burden, Kamaji provides the capability to live migrate data from a datastore to another one of the same driver without manual and error prone backup and restore operations.
|
||||
|
||||
> Currently, live data migration is only available between datastores having the same driver.
|
||||
|
||||
## Konnectivity
|
||||
|
||||
In addition to the standard control plane containers, Kamaji creates an instance of [konnectivity-server](https://kubernetes.io/docs/concepts/architecture/control-plane-node-communication/) running as sidecar container in the `tcp` pod and exposed on port `8132` of the `tcp` service.
|
||||
|
||||
This is required when the tenant worker nodes are not reachable from the `tcp` pods. The Konnectivity service consists of two parts: the Konnectivity server in the tenant control plane pod and the Konnectivity agents running on the tenant worker nodes.
|
||||
|
||||
After worker nodes joined the tenant control plane, the Konnectivity agents initiate connections to the Konnectivity server and maintain the network connections. After enabling the Konnectivity service, all control plane to worker nodes traffic goes through these connections.
|
||||
|
||||
> In Kamaji, Konnectivity is enabled by default and can be disabled when not required.
|
||||
|
||||
36
docs/content/concepts/datastore.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Datastore
|
||||
|
||||
A critical part of any Kubernetes control plane is its datastore, the system that persists the cluster’s state, configuration, and operational data. In Kamaji, this requirement is addressed with flexibility and scalability in mind, allowing you to choose the best storage backend for your needs and to manage many clusters efficiently.
|
||||
|
||||
Kamaji’s architecture decouples the control plane from its underlying datastore. Instead of each Tenant Cluster running its own dedicated datastore instance, Kamaji enables you to share datastores across multiple Tenant Clusters, or assign a dedicated datastore to each Tenant Cluster where needed. This approach optimizes resource usage, simplifies operations, and supports a variety of backend technologies.
|
||||
|
||||
## Supported Datastore Backends
|
||||
|
||||
Kamaji supports several options for persisting Tenant Cluster state:
|
||||
|
||||
- **etcd:**
|
||||
The default and most widely used Kubernetes datastore. You can deploy one or more etcd clusters in the Management Cluster and assign them to Tenant Control Planes as needed.
|
||||
|
||||
- **SQL Databases:**
|
||||
For environments where etcd is not ideal, Kamaji integrates with [kine](https://github.com/k3s-io/kine), allowing you to use MySQL or PostgreSQL-compatible databases as the backend for Tenant Clusters.
|
||||
|
||||
!!! info "NATS"
|
||||
The support of [NATS](https://nats.io/) is still experimental, mostly because multi-tenancy is not (yet) supported in NATS.
|
||||
|
||||
## Declarative Management
|
||||
|
||||
Datastores are managed declaratively using the `DataStore` Custom Resource Definition (CRD). This makes it easy to define, configure, and assign datastores to Tenant Control Planes, and fits naturally into GitOps and Infrastructure as Code workflows.
|
||||
|
||||
## Pooling and Scalability
|
||||
|
||||
By default, Kamaji can persist all Tenant Clusters’ data in a single datastore, but you can also create pools of datastores and assign clusters based on resource requirements, performance needs, or organizational policies. This pooling capability is especially useful for large-scale environments, where distributing the load across multiple datastores ensures resilience and scalability.
|
||||
|
||||
Kamaji’s roadmap includes a datastore scheduler, which will automatically assign new Tenant Clusters to the most appropriate datastore in the pool, further reducing operational overhead.
|
||||
|
||||
## Live Migration
|
||||
|
||||
Operational needs change over time, and Kamaji makes it easy to adapt. You can live-migrate a Tenant Cluster’s data from one datastore to another, as long as they use the same backend driver, without manual backup and restore steps. This feature simplifies Day 2 operations and helps you optimize your infrastructure as your requirements evolve.
|
||||
|
||||
!!! info "Datastore Migration"
|
||||
Currently, live data migration is only available between datastores having the same driver.
|
||||
|
||||
57
docs/content/concepts/index.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# High Level Overview
|
||||
|
||||
Kamaji is an open source Kubernetes Operator that transforms any Kubernetes cluster into a **Management Cluster** capable of orchestrating and managing multiple independent **Tenant Clusters**. This architecture is designed to simplify large-scale Kubernetes operations, reduce infrastructure costs, and provide strong isolation between tenants.
|
||||
|
||||

|
||||
|
||||
## Architecture Overview
|
||||
|
||||
- **Management Cluster:**
|
||||
The central cluster where Kamaji is installed. It hosts the control planes for all Tenant Clusters as regular Kubernetes pods, leveraging the Management Cluster’s reliability, scalability, and operational features.
|
||||
|
||||
- **Tenant Clusters:**
|
||||
These are user-facing Kubernetes clusters, each with its own dedicated control plane running as pods in the Management Cluster. Tenant Clusters are fully isolated from each other and unaware of the Management Cluster’s existence.
|
||||
|
||||
- **Tenant Worker Nodes:**
|
||||
Regular virtual or bare metal machines that join a Tenant Cluster by connecting to its control plane. These nodes run only tenant workloads, ensuring strong security and resource isolation.
|
||||
|
||||
## Design Principles
|
||||
|
||||
- **Unidirectional Management:**
|
||||
The Management Cluster manages all Tenant Clusters. Communication is strictly one-way: Tenant Clusters do not have access to or awareness of the Management Cluster.
|
||||
|
||||
- **Strong Isolation:**
|
||||
There is no communication between different Tenant Clusters. Each cluster is fully isolated at the control plane and data store level.
|
||||
|
||||
- **Declarative Operations:**
|
||||
Kamaji leverages Kubernetes Custom Resource Definitions (CRDs) to provide a fully declarative approach to managing control planes, datastores, and other resources.
|
||||
|
||||
- **CNCF Compliance:**
|
||||
Kamaji uses upstream, unmodified Kubernetes components and kubeadm for control plane setup, ensuring that all Tenant Clusters are [CNCF Certified Kubernetes](https://www.cncf.io/certification/software-conformance/) and compatible with standard Kubernetes tooling.
|
||||
|
||||
## Extensibility and Integrations
|
||||
|
||||
Kamaji is designed to integrate seamlessly with the broader cloud-native and enterprise ecosystem, enabling organizations to leverage their existing tools and infrastructure:
|
||||
|
||||
- **Infrastructure as Code:**
|
||||
Kamaji works well with tools like [Terraform](https://www.terraform.io/) and [Ansible](https://www.ansible.com/) for automated cluster provisioning and management.
|
||||
|
||||
- **GitOps:**
|
||||
Kamaji supports GitOps workflows, enabling you to manage cluster and tenant lifecycle declaratively through version-controlled repositories using tools like [Flux](https://fluxcd.io/) or [Argo CD](https://argo-cd.readthedocs.io/). This ensures consistency, auditability, and repeatability in your operations.
|
||||
|
||||
- **Cluster API Integration:**
|
||||
Kamaji can be used as a [Cluster API Control Plane Provider](https://github.com/clastix/cluster-api-control-plane-provider-kamaji), enabling automated, declarative lifecycle management of clusters and worker nodes across any infrastructure.
|
||||
|
||||
- **Enterprise Addons:**
|
||||
Additional features, such as Ingress management for Tenant Control Planes, are available as enterprise-grade addons.
|
||||
|
||||
## Learn More
|
||||
|
||||
Explore the following concepts to understand how Kamaji works under the hood:
|
||||
|
||||
- [Tenant Control Plane](tenant-control-plane.md)
|
||||
- [Datastore](datastore.md)
|
||||
- [Tenant Worker Nodes](tenant-worker-nodes.md)
|
||||
- [Konnectivity](konnectivity.md)
|
||||
|
||||
Kamaji’s architecture is designed for flexibility, scalability, and operational simplicity, making it an ideal solution for organizations managing multiple Kubernetes clusters at scale.
|
||||
37
docs/content/concepts/konnectivity.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# Konnectivity
|
||||
|
||||
In traditional Kubernetes deployments, the control plane components need to communicate directly with worker nodes for various operations like executing commands in pods, retrieving logs, or managing port forwards. However, in many real-world environments, especially those spanning multiple networks or cloud providers, direct communication isn't always possible or desirable. This is where Konnectivity comes in.
|
||||
|
||||
## Understanding Konnectivity in Kamaji
|
||||
|
||||
Kamaji integrates [Konnectivity](https://kubernetes.io/docs/concepts/architecture/control-plane-node-communication/) as a core component of its architecture. Each Tenant Control Plane pod includes a konnectivity-server running as a sidecar container, which establishes and maintains secure tunnels with agents running on the worker nodes. This design ensures reliable communication even in complex network environments.
|
||||
|
||||
The Konnectivity service consists of two main components:
|
||||
|
||||
1. **Konnectivity Server:**
|
||||
Runs alongside the control plane components in each Tenant Control Plane pod and is exposed on port 8132. It manages connections from worker nodes and routes traffic appropriately.
|
||||
|
||||
2. **Konnectivity Agent:**
|
||||
Runs on each worker node and initiates outbound connections to its control plane's Konnectivity server. These connections are maintained to create a reliable tunnel for all control plane to worker node communication.
|
||||
|
||||
## How It Works
|
||||
|
||||
When a worker node joins a Tenant Cluster, the Konnectivity agents automatically establish connections to their designated Konnectivity server. These connections are maintained continuously, ensuring reliable communication paths between the control plane and worker nodes.
|
||||
|
||||
All traffic from the control plane to worker nodes flows through these established tunnels, enabling operations such as:
|
||||
|
||||
- Executing commands in pods
|
||||
- Retrieving container logs
|
||||
- Managing port forwards
|
||||
- Collecting metrics and health information
|
||||
- Running exec sessions for debugging
|
||||
|
||||
## Configuration and Management
|
||||
|
||||
Konnectivity is enabled by default in Kamaji, as it's considered a best practice for modern Kubernetes deployments. However, it can be disabled if your environment has different requirements or if you need to use alternative networking solutions.
|
||||
|
||||
The service is automatically configured when worker nodes join a cluster, without requiring any operational overhead. The connection details are managed as part of the standard node bootstrap process, making it transparent to cluster operators and users.
|
||||
|
||||
---
|
||||
|
||||
By integrating Konnectivity as a core feature, Kamaji ensures that your Tenant Clusters can operate reliably and securely across any network topology, making it easier to build and manage distributed Kubernetes environments at scale.
|
||||
29
docs/content/concepts/tenant-control-plane.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Tenant Control Plane
|
||||
|
||||
|
||||
Kamaji introduces a new way to manage Kubernetes control planes at scale. Instead of dedicating separate machines to each cluster’s control plane, Kamaji runs every Tenant Cluster’s control plane as a set of pods inside the Management Cluster. This design unlocks significant efficiencies: you can operate hundreds or thousands of isolated Kubernetes clusters on shared infrastructure, all while maintaining strong separation and reliability.
|
||||
|
||||
At the heart of this approach is Kamaji’s commitment to upstream compatibility. The control plane components—`kube-apiserver`, `kube-scheduler`, and `kube-controller-manager`—are the same as those used in any CNCF-compliant Kubernetes cluster. Kamaji uses `kubeadm` for setup and lifecycle management, so you get the benefits of a standard, certified Kubernetes experience.
|
||||
|
||||
## How It Works
|
||||
|
||||
When you want to create a new Tenant Cluster, you simply define a `TenantControlPlane` resource in the Management Cluster. Kamaji’s controllers take over from there, deploying the necessary control plane pods, configuring networking, and connecting to the appropriate datastore. The control plane is exposed via a Kubernetes Service—by default as a `LoadBalancer`, but you can also use `NodePort` or `ClusterIP` depending on your needs.
|
||||
|
||||
Worker nodes, whether virtual machines or bare metal, join the Tenant Cluster by connecting to its control plane endpoint. This process is compatible with standard Kubernetes tools and can be automated using Cluster API or other infrastructure automation solutions.
|
||||
|
||||
## Highlights
|
||||
|
||||
- **Efficiency and Scale:**
|
||||
By running control planes as pods, Kamaji reduces the infrastructure and operational overhead of managing many clusters.
|
||||
|
||||
- **High Availability and Automation:**
|
||||
Control plane pods are managed by Kubernetes Deployments, enabling rolling updates, self-healing, and autoscaling. Kamaji automates the entire lifecycle, from creation to deletion.
|
||||
|
||||
- **Declarative and GitOps:**
|
||||
The `TenantControlPlane` custom resource allows you to manage clusters declaratively, fitting perfectly with GitOps and Infrastructure as Code workflows.
|
||||
|
||||
- **Seamless Integration:**
|
||||
Kamaji works with Cluster API, supports a variety of datastores, and is compatible with the full Kubernetes ecosystem.
|
||||
|
||||
Kamaji’s Tenant Control Plane model is designed for organizations that need to deliver robust, production-grade Kubernetes clusters at scale—whether for internal platform engineering, managed services, or multi-tenant environments.
|
||||
|
||||
53
docs/content/concepts/tenant-worker-nodes.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# Tenant Worker Nodes
|
||||
|
||||
While Kamaji innovates in how control planes are managed, Tenant Worker Nodes remain true to their Kubernetes roots: they are regular virtual machines or bare metal servers that run your workloads. What makes them special in Kamaji's architecture is how they integrate with the containerized control planes and how they can be managed at scale across diverse infrastructure environments.
|
||||
|
||||
## Understanding Worker Nodes in Kamaji
|
||||
|
||||
In a Kamaji managed cluster, worker nodes connect to their Tenant Control Plane just as they would in a traditional Kubernetes setup. The key difference is that the control plane they're connecting to runs as pods within the Management Cluster, rather than on dedicated machines. This architectural choice maintains compatibility with existing tools and workflows while enabling more efficient resource utilization.
|
||||
|
||||
Each worker node belongs to exactly one Tenant Cluster and runs only that tenant's workloads. This clear separation ensures strong isolation between different tenants' applications and data, making Kamaji suitable for multi-tenant environments.
|
||||
|
||||
## Infrastructure Flexibility
|
||||
|
||||
Your worker nodes can run:
|
||||
|
||||
- On bare metal servers in a data center
|
||||
- As virtual machines in private clouds
|
||||
- On public cloud instances
|
||||
- At edge locations
|
||||
- In hybrid or multi-cloud configurations
|
||||
|
||||
This flexibility allows you to place workloads where they make the most sense for your use case, whether that's close to users, near data sources, or in specific regulatory environments.
|
||||
|
||||
## Lifecycle Management Options
|
||||
|
||||
Kamaji supports multiple approaches to managing worker node lifecycles:
|
||||
|
||||
### Manual Management
|
||||
For simple setups or specific requirements, you can join worker nodes to their Tenant Clusters using standard `kubeadm` commands. This process is familiar to Kubernetes administrators and works just as it would with traditionally deployed clusters.
|
||||
|
||||
!!! tip "yaki"
|
||||
See [yaki](https://goyaki.clastix.io/) script, which you could modify for your preferred operating system and version. The provided script is just a facility: it assumes all worker nodes are running `Ubuntu`. Make sure to adapt the script if you're using a different OS distribution.
|
||||
|
||||
### Automation Tools
|
||||
You can use standard infrastructure automation tools to manage worker nodes:
|
||||
|
||||
- Terraform for infrastructure provisioning
|
||||
- Ansible for configuration management
|
||||
|
||||
|
||||
### Cluster API Integration
|
||||
For more sophisticated automation, Kamaji provides a [Cluster API Control Plane Provider](https://github.com/clastix/cluster-api-control-plane-provider-kamaji).
|
||||
|
||||
This integration enables:
|
||||
|
||||
- Declarative management of both tenant control planes and tenant worker nodes
|
||||
- Automated scaling and updates
|
||||
- Integration with infrastructure providers for major cloud platforms
|
||||
- Consistent management across different environments
|
||||
|
||||
---
|
||||
|
||||
Kamaji's approach to worker nodes combines the familiarity of traditional Kubernetes with the flexibility to run anywhere and the ability to manage at scale. Whether you're building a private cloud platform, offering Kubernetes as a service, or managing edge computing infrastructure, Kamaji provides the tools and patterns you need.
|
||||
|
||||
@@ -31,7 +31,8 @@ Following is the list of supported Ingress Controllers:
|
||||
|
||||
- [HAProxy Technologies Kubernetes Ingress](https://github.com/haproxytech/kubernetes-ingress)
|
||||
|
||||
> Active subscribers can request additional Ingress Controller flavours
|
||||
!!! info "Other Ingress Controllers"
|
||||
Active subscribers can request support for additional Ingress Controller flavours.
|
||||
|
||||
## How to enable the Addon
|
||||
|
||||
@@ -89,9 +90,7 @@ spec:
|
||||
```
|
||||
|
||||
The pattern for the generated hosts is the following:
|
||||
`${tcp.namespace}-${tcp.name}.{k8s|konnectivity}.${ADDON_ANNOTATION_VALUE}`
|
||||
|
||||
> Please, notice the `konnectivity` rule will be created only if the `konnectivity` addon has been enabled.
|
||||
`${tcp.namespace}-${tcp.name}.{k8s|konnectivity}.${ADDON_ANNOTATION_VALUE}`. Please, notice the `konnectivity` rule will be created only if the `konnectivity` addon has been enabled.
|
||||
|
||||
## Infrastructure requirements
|
||||
|
||||
@@ -142,7 +141,8 @@ spec:
|
||||
The `ingressClassName` value must match a non-handled `IngressClass` object,
|
||||
the addon will take care of generating the correct object.
|
||||
|
||||
> Nota Bene: the `hostname` must absolutely point to the 443 port
|
||||
!!! warning "Use the right port"
|
||||
The `hostname` field must absolutely point to the 443 port!
|
||||
|
||||
### Kubernetes components extra Arguments
|
||||
|
||||
|
||||
16
docs/content/getting-started/index.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Getting started
|
||||
|
||||
This section contains how to get started with Kamaji on different environments:
|
||||
|
||||
!!! success "Slow Start"
|
||||
The material provided in this section is intended to be a slow start to Kamaji.
|
||||
|
||||
It is intended to be a deep learning experience, and to help you getting started with Kamaji while understanding the components involved and the core concepts behind it. We do not provide any "one-click" deployment here.
|
||||
|
||||
- [Getting started with Kamaji on Kind](./kamaji-kind.md)
|
||||
- [Getting started with Kamaji on generic infra](./kamaji-generic.md)
|
||||
- [Getting started with Kamaji on EKS](./kamaji-aws.md)
|
||||
- [Getting started with Kamaji on AKS](./kamaji-azure.md)
|
||||
|
||||
|
||||
|
||||
415
docs/content/getting-started/kamaji-aws.md
Normal file
@@ -0,0 +1,415 @@
|
||||
# Kamaji on AWS
|
||||
|
||||
This guide will lead you through the process of creating a working Kamaji setup on on AWS.
|
||||
|
||||
The guide requires:
|
||||
|
||||
- a bootstrap machine
|
||||
- a Kubernetes cluster (EKS) to run the Management and Tenant Control Planes
|
||||
- an arbitrary number of machines to host Tenant workloads.
|
||||
|
||||
## Summary
|
||||
|
||||
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
|
||||
* [Access Management Cluster](#access-management-cluster)
|
||||
* [Install Kamaji](#install-kamaji)
|
||||
* [Create Tenant Cluster](#create-tenant-cluster)
|
||||
* [Cleanup](#cleanup)
|
||||
|
||||
## Prepare the bootstrap workspace
|
||||
|
||||
On the bootstrap machine, clone the repo and prepare the workspace directory:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/clastix/kamaji
|
||||
cd kamaji/deploy
|
||||
```
|
||||
|
||||
We assume you have installed on the bootstrap machine:
|
||||
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
|
||||
- [helm](https://helm.sh/docs/intro/install/)
|
||||
- [jq](https://stedolan.github.io/jq/)
|
||||
- [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html)
|
||||
- [eksctl](https://eksctl.io/installation/)
|
||||
- [clusterawsadm](https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases)
|
||||
|
||||
Make sure you have a valid AWS Account, and login to AWS:
|
||||
|
||||
```bash
|
||||
aws configure
|
||||
```
|
||||
|
||||
## Access Management cluster
|
||||
|
||||
In Kamaji, a Management Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The Management Cluster acts as a cockpit for all the Tenant clusters and implements monitoring, logging, and governance of all the Kamaji setups, including all Tenant Clusters. For this guide, we're going to use an instance of AWS Kubernetes Service (EKS) as a Management Cluster.
|
||||
|
||||
Throughout the following instructions, shell variables are used to indicate values that you should adjust to your own AWS environment:
|
||||
|
||||
### Create EKS cluster
|
||||
|
||||
In order to create quickly an EKS cluster, we will use `eksctl` provided by AWS. `eksctl` is a simple CLI tool for creating and managing clusters on EKS
|
||||
|
||||
`eksctl` will provision for you:
|
||||
|
||||
- A dedicated VPC on `192.168.0.0/16` CIDR
|
||||
- 3 private subnets and 3 public subnets in 3 different availability zones
|
||||
- NAT Gateway for the private subnets, An internet gateway for the public ones
|
||||
- The required route tables to associate the subnets with the IGW and the NAT gateways
|
||||
- Provision the EKS cluster
|
||||
- Provision worker nodes and associate them to your cluster
|
||||
- Optionally creates the required IAM policies for your addons and attach them to the node
|
||||
- Optionally, install the EKS add-ons to your cluster
|
||||
|
||||
For our use case, we will create an EKS cluster with the following configuration:
|
||||
|
||||
```bash
|
||||
source kamaji-aws.env
|
||||
|
||||
cat > eks-cluster.yaml <<EOF
|
||||
apiVersion: eksctl.io/v1alpha5
|
||||
kind: ClusterConfig
|
||||
|
||||
metadata:
|
||||
name: ${KAMAJI_CLUSTER}
|
||||
region: ${KAMAJI_REGION}
|
||||
version: ${KAMAJI_CLUSTER_VERSION}
|
||||
iam:
|
||||
withOIDC: true
|
||||
vpc:
|
||||
clusterEndpoints:
|
||||
privateAccess: true
|
||||
publicAccess: true
|
||||
managedNodeGroups:
|
||||
- name: ${KAMAJI_NODE_NG}
|
||||
labels: { role: workers }
|
||||
instanceType: ${KAMAJI_NODE_TYPE}
|
||||
desiredCapacity: 1
|
||||
privateNetworking: true
|
||||
availabilityZones: [${KAMAJI_AZ}]
|
||||
iam:
|
||||
withAddonPolicies:
|
||||
certManager: true
|
||||
ebs: true
|
||||
externalDNS: true
|
||||
addons:
|
||||
- name: aws-ebs-csi-driver
|
||||
EOF
|
||||
|
||||
eks create cluster -f eks-cluster.yaml
|
||||
```
|
||||
|
||||
Please note :
|
||||
|
||||
- The `aws-ebs-csi-driver` addon is required to use EBS volumes as persistent volumes. This will be mainly used to store the tenant control plane data using the _default_ `etcd` DataStore.
|
||||
- We created a node group with 1 node in one availability zone to simplify the setup.
|
||||
|
||||
### Access to the management cluster
|
||||
|
||||
And check you can access:
|
||||
|
||||
```bash
|
||||
aws eks update-kubeconfig --region ${KAMAJI_REGION} --name ${KAMAJI_CLUSTER}
|
||||
kubectl cluster-info
|
||||
# make ebs as a default storage class
|
||||
kubectl patch storageclass gp2 -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
|
||||
```
|
||||
|
||||
### Add route 53 domain
|
||||
|
||||
In order to easily access tenant clusters, it is recommended to create a Route53 domain or use an existing one if it exists
|
||||
|
||||
```bash
|
||||
# for within VPC
|
||||
aws route53 create-hosted-zone --name "$TENANT_DOMAIN" --caller-reference $(date +%s) --vpc "VPCRegion=$KAMAJI_REGION,VPCId=$KAMAJI_VPC_ID"
|
||||
```
|
||||
|
||||
## Install Kamaji
|
||||
|
||||
Follow the [Getting Started](kamaji-generic.md) to install Cert Manager and the Kamaji Controller.
|
||||
|
||||
### Install Cert Manager
|
||||
|
||||
Kamaji takes advantage of the [dynamic admission control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), such as validating and mutating webhook configurations. These webhooks are secured by a TLS communication, and the certificates are managed by [`cert-manager`](https://cert-manager.io/), making it a prerequisite that must be installed:
|
||||
|
||||
```bash
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update
|
||||
helm install \
|
||||
cert-manager jetstack/cert-manager \
|
||||
--namespace cert-manager \
|
||||
--create-namespace \
|
||||
--version v1.11.0 \
|
||||
--set installCRDs=true
|
||||
```
|
||||
|
||||
### Install ExternalDNS (optional)
|
||||
|
||||
ExternalDNS allows updating your DNS records dynamically from an annotation that you add in the service within EKS. Run the following commands to install the ExternalDNS Helm chart:
|
||||
|
||||
```bash
|
||||
helm repo add external-dns https://kubernetes-sigs.github.io/external-dns/
|
||||
helm repo update
|
||||
helm install external-dns external-dns/external-dns \
|
||||
--namespace external-dns \
|
||||
--create-namespace \
|
||||
--version 1.15.1
|
||||
```
|
||||
|
||||
### Install Kamaji Controller
|
||||
|
||||
Installing Kamaji via Helm charts is the preferred way. Run the following commands to install a stable release of Kamaji:
|
||||
|
||||
```bash
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace --version 0.0.0+latest
|
||||
```
|
||||
|
||||
## Create Tenant Cluster
|
||||
|
||||
Now that our management cluster is up and running, we can create a Tenant Cluster. A Tenant Cluster is a Kubernetes cluster that is managed by Kamaji.
|
||||
|
||||
### Tenant Control Plane
|
||||
|
||||
A tenant cluster is made of a `Tenant Control Plane` and an arbitrary number of worker nodes. The `Tenant Control Plane` is a Kubernetes Control Plane managed by Kamaji and responsible for running the Tenant's workloads.
|
||||
|
||||
Before creating a Tenant Control Plane, you need to define some variables:
|
||||
|
||||
```bash
|
||||
export KAMAJI_VPC_ID=$(aws ec2 describe-vpcs --filters "Name=tag:Name,Values=$KAMAJI_VPC_NAME" --query "Vpcs[0].VpcId" --output text)
|
||||
export KAMAJI_PUBLIC_SUBNET_ID=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=$KAMAJI_VPC_ID" --filters "Name=tag:Name,Values=$KAMAJI_PUBLIC_SUBNET_NAME" --query "Subnets[0].SubnetId" --output text)
|
||||
export TENANT_EIP_ID=$(aws ec2 allocate-address --query 'AllocationId' --output text)
|
||||
export TENANT_PUBLIC_IP=$(aws ec2 describe-addresses --allocation-ids $TENANT_EIP_ID --query 'Addresses[0].PublicIp' --output text)
|
||||
```
|
||||
|
||||
In the next step, we will create a Tenant Control Plane with the following configuration:
|
||||
|
||||
```yaml
|
||||
cat > ${TENANT_NAMESPACE}-${TENANT_NAME}.yaml <<EOF
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ${TENANT_NAMESPACE}
|
||||
---
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: ${TENANT_NAME}
|
||||
namespace: ${TENANT_NAMESPACE}
|
||||
labels:
|
||||
tenant.clastix.io: ${TENANT_NAME}
|
||||
spec:
|
||||
dataStore: default
|
||||
controlPlane:
|
||||
deployment:
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
topology.kubernetes.io/zone: ${KAMAJI_AZ}
|
||||
additionalMetadata:
|
||||
labels:
|
||||
tenant.clastix.io: ${TENANT_NAME}
|
||||
extraArgs:
|
||||
apiServer: []
|
||||
controllerManager: []
|
||||
scheduler: []
|
||||
resources:
|
||||
apiServer:
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 512Mi
|
||||
limits: {}
|
||||
controllerManager:
|
||||
requests:
|
||||
cpu: 125m
|
||||
memory: 256Mi
|
||||
limits: {}
|
||||
scheduler:
|
||||
requests:
|
||||
cpu: 125m
|
||||
memory: 256Mi
|
||||
limits: {}
|
||||
service:
|
||||
additionalMetadata:
|
||||
labels:
|
||||
tenant.clastix.io: ${TENANT_NAME}
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||
service.beta.kubernetes.io/aws-load-balancer-subnets: ${KAMAJI_PUBLIC_SUBNET_ID}
|
||||
service.beta.kubernetes.io/aws-load-balancer-eip-allocations: ${TENANT_EIP_ID}
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: nlb
|
||||
external-dns.alpha.kubernetes.io/hostname: ${TENANT_NAME}.${TENANT_DOMAIN}
|
||||
serviceType: LoadBalancer
|
||||
kubernetes:
|
||||
version: ${TENANT_VERSION}
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
admissionControllers:
|
||||
- ResourceQuota
|
||||
- LimitRanger
|
||||
networkProfile:
|
||||
address: ${TENANT_PUBLIC_IP}
|
||||
port: ${TENANT_PORT}
|
||||
certSANs:
|
||||
- ${TENANT_NAME}.${TENANT_DOMAIN}
|
||||
serviceCidr: ${TENANT_SVC_CIDR}
|
||||
podCidr: ${TENANT_POD_CIDR}
|
||||
dnsServiceIPs:
|
||||
- ${TENANT_DNS_SERVICE}
|
||||
addons:
|
||||
coreDNS: {}
|
||||
kubeProxy: {}
|
||||
konnectivity:
|
||||
server:
|
||||
port: ${TENANT_PROXY_PORT}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits: {}
|
||||
EOF
|
||||
|
||||
kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}.yaml
|
||||
```
|
||||
|
||||
Make sure:
|
||||
|
||||
- Tenant Control Plane will expose the API server using a public IP address through a network load balancer.
|
||||
it is important to provide a static public IP address for the API server in order to make it reachable from the outside world.
|
||||
|
||||
- The following annotation: `external-dns.alpha.kubernetes.io/hostname` is set to create the DNS record. It tells AWS to expose the Tenant Control Plane with a public domain name: `${TENANT_NAME}.${TENANT_DOMAIN}`.
|
||||
|
||||
Since AWS load Balancer does not support setting LoadBalancerIP, you will get the following warning on the service created for the control plane tenant `Error syncing load balancer: failed to ensure load balancer: LoadBalancerIP cannot be specified for AWS ELB`. you can ignore it for now.
|
||||
|
||||
### Working with Tenant Control Plane
|
||||
|
||||
Check the access to the Tenant Control Plane:
|
||||
|
||||
```bash
|
||||
curl -k https://${TENANT_PUBLIC_IP}:${TENANT_PORT}/version
|
||||
curl -k https://${TENANT_NAME}.${TENANT_DOMAIN}:${TENANT_PORT}/healthz
|
||||
curl -k https://${TENANT_NAME}.${TENANT_DOMAIN}:${TENANT_PORT}/version
|
||||
```
|
||||
|
||||
!!! warning "Using Private Domains"
|
||||
If the domain you used is a private __Route 53__ domain make sure to map the public IP of the LoadBalancer to `${TENANT_NAME}.${TENANT_DOMAIN}` in your `/etc/hosts`. Otherwise, `kubectl` will fail to check SSL certificates
|
||||
|
||||
|
||||
Let's retrieve the `kubeconfig` in order to work with it:
|
||||
|
||||
```bash
|
||||
kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-admin-kubeconfig -o json \
|
||||
| jq -r '.data["admin.conf"]' \
|
||||
| base64 --decode \
|
||||
> ${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
|
||||
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig config \
|
||||
set-cluster ${TENANT_NAME} \
|
||||
--server https://${TENANT_NAME}.${TENANT_DOMAIN}:${TENANT_PORT}
|
||||
```
|
||||
|
||||
and let's check it out:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get svc
|
||||
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 38h
|
||||
```
|
||||
|
||||
Check out how the Tenant Control Plane advertises itself:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get ep
|
||||
|
||||
NAME ENDPOINTS AGE
|
||||
kubernetes 13.37.33.12:6443 3m22s
|
||||
```
|
||||
|
||||
### Join worker nodes
|
||||
|
||||
The Tenant Control Plane is made of pods running in the Kamaji Management Cluster. At this point, the Tenant Cluster has no worker nodes. So, the next step is to join some worker nodes to the Tenant Control Plane.
|
||||
|
||||
Kamaji does not provide any helper for the creation of tenant worker nodes, instead, it leverages the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Currently, a Cluster API `ControlPlane` provider for AWS is available: check the [official documentation](https://github.com/clastix/cluster-api-control-plane-provider-kamaji/blob/master/docs/providers-aws.md).
|
||||
|
||||
An alternative approach to create and join worker nodes in AWS is to manually create the VMs, turn them into Kubernetes worker nodes and then join through the `kubeadm` command.
|
||||
|
||||
### Generate kubeadm join command
|
||||
|
||||
To join the worker nodes to the Tenant Control Plane, you need to generate the `kubeadm join` command from the Management cluster:
|
||||
|
||||
```bash
|
||||
TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."spec.loadBalancerIP")
|
||||
JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --ttl 0 --print-join-command |cut -d" " -f4-)
|
||||
```
|
||||
|
||||
!!! tip "Token expiration"
|
||||
Setting `--ttl=0` on the `kubeadm token create` will guarantee that the token will never expire and can be used every time. It's not intended for production-grade setups.
|
||||
|
||||
### Create tenant worker nodes
|
||||
In this section, we will use AMI provided by CAPA (Cluster API Provider AWS) to create the worker nodes. Those AMIs are built using [image builder](https://github.com/kubernetes-sigs/image-builder/tree/main) and contain all the necessary components to join the cluster.
|
||||
|
||||
```bash
|
||||
export KAMAJI_PRIVATE_SUBNET_ID=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=$KAMAJI_VPC_ID" --filters "Name=tag:Name,Values=$KAMAJI_PRIVATE_SUBNET_NAME" --query "Subnets[0].SubnetId" --output text)
|
||||
export WORKER_AMI=$(clusterawsadm ami list --kubernetes-version=$TENANT_VERSION --os=ubuntu-24.04 --region=$KAMAJI_REGION -o json | jq -r .items[0].spec.imageID)
|
||||
|
||||
cat <<EOF >> worker-user-data.sh
|
||||
#!/bin/bash
|
||||
$JOIN_CMD
|
||||
EOF
|
||||
|
||||
aws ec2 run-instances --image-id $WORKER_AMI --instance-type "t2.medium" --user-data $(cat worker-user-data.sh | base64 -w0) --network-interfaces '{"SubnetId":'"'${KAMAJI_PRIVATE_SUBNET_ID}'"',"AssociatePublicIpAddress":false,"DeviceIndex":0,"Groups":["<REPLACE_WITH_SG>"]}' --count "1"
|
||||
```
|
||||
|
||||
We have used user data to run the `kubeadm join` command on the instance boot. This will make sure that the worker node will join the cluster automatically.
|
||||
|
||||
Make sure to replace `<REPLACE_WITH_SG>` with the security group id that allows the worker nodes to communicate with the public IP of the tenant control plane
|
||||
|
||||
Checking the nodes in the Tenant Cluster:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
|
||||
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ip-192-168-153-94 NotReady <none> 56m v1.30.2
|
||||
```
|
||||
|
||||
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In this guide, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico), but feel free to use one of your taste.
|
||||
|
||||
Download the latest stable Calico manifest:
|
||||
|
||||
```bash
|
||||
curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/calico.yaml -O
|
||||
```
|
||||
|
||||
As per [documentation](https://projectcalico.docs.tigera.io/reference/public-cloud/AWS), Calico in VXLAN mode is supported on AWS while IPIP packets are blocked by the AWS network fabric. Make sure you edit the manifest above and set the following variables:
|
||||
|
||||
- `CLUSTER_TYPE="k8s"`
|
||||
- `CALICO_IPV4POOL_IPIP="Never"`
|
||||
- `CALICO_IPV4POOL_VXLAN="Always"`
|
||||
|
||||
Apply to the Tenant Cluster:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig apply -f calico.yaml
|
||||
```
|
||||
|
||||
And after a while, nodes will be ready
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
|
||||
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ip-192-168-153-94 Ready <none> 59m v1.30.2
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
To get rid of the whole Kamaji infrastructure, remove the EKS cluster:
|
||||
|
||||
```bash
|
||||
eksctl delete cluster -f eks-cluster.yaml
|
||||
```
|
||||
|
||||
That's all folks!
|
||||
@@ -1,14 +1,11 @@
|
||||
# Setup Kamaji on Azure
|
||||
# Kamaji on Azure
|
||||
This guide will lead you through the process of creating a working Kamaji setup on on MS Azure.
|
||||
|
||||
!!! warning ""
|
||||
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
|
||||
|
||||
The guide requires:
|
||||
|
||||
- a bootstrap machine
|
||||
- a Kubernetes cluster to run the Admin and Tenant Control Planes
|
||||
- an arbitrary number of machines to host `Tenant`s' workloads
|
||||
- a Kubernetes cluster (AKS) to run the Management and Tenant Control Planes
|
||||
- an arbitrary number of machines to host Tenant workloads.
|
||||
|
||||
## Summary
|
||||
|
||||
@@ -98,7 +95,7 @@ kubectl cluster-info
|
||||
|
||||
## Install Kamaji
|
||||
|
||||
Follow the [Getting Started](../getting-started.md) to install Cert Manager and the Kamaji Controller.
|
||||
Follow the [Getting Started](kamaji-generic.md) to install Cert Manager and the Kamaji Controller.
|
||||
|
||||
## Create Tenant Cluster
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
# Getting started with Kamaji
|
||||
# Kamaji on generic infra
|
||||
This guide will lead you through the process of creating a working Kamaji setup on a generic infrastructure.
|
||||
|
||||
!!! warning ""
|
||||
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
|
||||
|
||||
The guide requires:
|
||||
|
||||
- a bootstrap machine
|
||||
- a Kubernetes cluster to run the Admin and Tenant Control Planes
|
||||
- an arbitrary number of machines to host `Tenant`s' workloads
|
||||
- a Kubernetes cluster to run the Management and Tenant Control Planes
|
||||
- an arbitrary number of machines to host Tenant workloads.
|
||||
|
||||
## Summary
|
||||
|
||||
@@ -43,11 +40,11 @@ Throughout the following instructions, shell variables are used to indicate valu
|
||||
source kamaji.env
|
||||
```
|
||||
|
||||
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the Management Clusterr should provide:
|
||||
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the Management Cluster should provide:
|
||||
|
||||
- CNI module installed, eg. [Calico](https://github.com/projectcalico/calico), [Cilium](https://github.com/cilium/cilium).
|
||||
- CSI module installed with a Storage Class for the Tenant datastores. Local Persistent Volumes are an option.
|
||||
- Support for LoadBalancer service type, eg. [MetalLB](https://metallb.universe.tf/), or a Cloud based controller.
|
||||
- CSI module installed with a Storage Class for the Tenant datastores. The [Local Path Provisioner](https://github.com/rancher/local-path-provisioner) is a suggested choice, even for production environments.
|
||||
- Support for LoadBalancer service type, eg. [MetalLB](https://metallb.io/), or cloud based.
|
||||
- Optionally, a Monitoring Stack installed, eg. [Prometheus](https://github.com/prometheus-community).
|
||||
|
||||
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into Kamaji Management Cluster and check you can access:
|
||||
@@ -67,29 +64,25 @@ helm install \
|
||||
cert-manager jetstack/cert-manager \
|
||||
--namespace cert-manager \
|
||||
--create-namespace \
|
||||
--version v1.11.0 \
|
||||
--set installCRDs=true
|
||||
```
|
||||
|
||||
## Install Kamaji Controller
|
||||
|
||||
Installing Kamaji via Helm charts is the preferred way. Run the following commands to install a stable release of Kamaji:
|
||||
|
||||
```bash
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
|
||||
```
|
||||
Installing Kamaji via Helm charts is the preferred way to deploy the Kamaji controller.
|
||||
The Helm chart is available in the `charts` directory of the Kamaji repository, or as Helm Chart versioned as `0.0.0+latest`
|
||||
|
||||
!!! info "Stable Releases"
|
||||
As of July 2024 [Clastix Labs](https://github.com/clastix) does no longer publish stable release artifacts. Stable releases are offered on a subscription basis by [CLASTIX](https://clastix.io), the main Kamaji project contributor.
|
||||
As of July 2024 [Clastix Labs](https://github.com/clastix) no longer publish version pinned release artifacts.
|
||||
Version pinned and stable releases are offered on a subscription basis by [CLASTIX](https://clastix.io), the main Kamaji project contributor.
|
||||
|
||||
Run the following commands to install latest edge release of Kamaji:
|
||||
Run the following commands to install the latest edge release of Kamaji:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/clastix/kamaji
|
||||
cd kamaji
|
||||
helm install kamaji charts/kamaji -n kamaji-system --create-namespace \
|
||||
helm install kamaji clastix/kamaji \
|
||||
--version 0.0.0+latest \
|
||||
--namespace kamaji-system \
|
||||
--create-namespace \
|
||||
--set image.tag=latest
|
||||
```
|
||||
|
||||
@@ -140,22 +133,7 @@ spec:
|
||||
apiServer: []
|
||||
controllerManager: []
|
||||
scheduler: []
|
||||
resources:
|
||||
apiServer:
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 512Mi
|
||||
limits: {}
|
||||
controllerManager:
|
||||
requests:
|
||||
cpu: 125m
|
||||
memory: 256Mi
|
||||
limits: {}
|
||||
scheduler:
|
||||
requests:
|
||||
cpu: 125m
|
||||
memory: 256Mi
|
||||
limits: {}
|
||||
resources: {}
|
||||
service:
|
||||
additionalMetadata:
|
||||
labels:
|
||||
@@ -182,11 +160,9 @@ spec:
|
||||
konnectivity:
|
||||
server:
|
||||
port: ${TENANT_PROXY_PORT}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits: {}
|
||||
resources: {}
|
||||
client:
|
||||
resources: {}
|
||||
EOF
|
||||
|
||||
kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
|
||||
@@ -198,7 +174,7 @@ After a few seconds, check the created resources in the tenants namespace and wh
|
||||
kubectl -n ${TENANT_NAMESPACE} get tcp,deploy,pods,svc
|
||||
|
||||
NAME VERSION STATUS CONTROL-PLANE ENDPOINT KUBECONFIG DATASTORE AGE
|
||||
tenantcontrolplane/tenant-00 v1.25.2 Ready 192.168.32.240:6443 tenant-00-admin-kubeconfig default 2m20s
|
||||
tenantcontrolplane/tenant-00 v1.32.2 Ready 192.168.32.240:6443 tenant-00-admin-kubeconfig default 2m20s
|
||||
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
deployment.apps/tenant-00 3/3 3 3 118s
|
||||
@@ -214,7 +190,40 @@ service/tenant-00 LoadBalancer 10.32.132.241 192.168.32.240 6443:32152/T
|
||||
|
||||
The regular Tenant Control Plane containers: `kube-apiserver`, `kube-controller-manager`, `kube-scheduler` are running unchanged in the `tcp` pods instead of dedicated machines and they are exposed through a service on the port `6443` of worker nodes in the Management Cluster.
|
||||
|
||||
The `LoadBalancer` service type is used to expose the Tenant Control Plane on the assigned `loadBalancerIP` acting as `ControlPlaneEndpoint` for the worker nodes and other clients as, for example, `kubectl`. Service types `NodePort` and `ClusterIP` are still viable options to expose the Tenant Control Plane, depending on the case. High Availability and rolling updates of the Tenant Control Planes are provided by the `tcp` Deployment and all the resources reconcilied by the Kamaji controller.
|
||||
The `LoadBalancer` service type is used to expose the Tenant Control Plane on the assigned `loadBalancerIP` acting as `ControlPlaneEndpoint` for the worker nodes and other clients as, for example, `kubectl`. Service types `NodePort` and `ClusterIP` are still viable options to expose the Tenant Control Plane, depending on the case. High Availability and rolling updates of the Tenant Control Planes are provided by the `tcp` Deployment and all the resources reconciled by the Kamaji controller.
|
||||
|
||||
### Assign a Specific Address to the Tenant Control Plane
|
||||
|
||||
When a Tenant Control Plane is created, Kamaji waits for the LoadBalancer to provide an address, which it then assigns to the `ControlPlaneEndpoint` field of the Tenant Control Plane. This address is crucial as it allows worker nodes and tenant users to access the Tenant Control Plane. By default, the LoadBalancer controller in your management cluster dynamically selects this address and passes it to Kamaji through the `Service` resource.
|
||||
|
||||
If you need to use a specific address for your Tenant Control Plane, you can specify it by setting the `tcp.spec.networkProfile.address` field in the Tenant Control Plane manifest. This optional field ensures that Kamaji uses your preferred address. However, if the specified address is unavailable, the Tenant Control Plane will remain in a `NotReady` state until the address becomes available.
|
||||
|
||||
To ensure that the LoadBalancer controller uses your specified address for the Service, you'll need to use controller-specific annotations. For instance, if you're using MetalLB as your LoadBalancer controller, you can add the `metallb.io/loadBalancerIPs` annotation to your Service definition, allowing the LoadBalancer controller to select the specified address:
|
||||
|
||||
```yaml
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: sample-tcp
|
||||
labels:
|
||||
tenant.clastix.io: sample-tcp
|
||||
spec:
|
||||
controlPlane:
|
||||
deployment:
|
||||
replicas: 2
|
||||
service:
|
||||
serviceType: LoadBalancer
|
||||
additionalMetadata:
|
||||
annotations:
|
||||
metallb.io/loadBalancerIPs: 172.18.255.104 # use this address
|
||||
kubernetes:
|
||||
version: "v1.30.0"
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
networkProfile:
|
||||
address: 172.18.255.104 # use this address
|
||||
port: 6443
|
||||
```
|
||||
|
||||
### Working with Tenant Control Plane
|
||||
|
||||
@@ -274,7 +283,7 @@ The Tenant Control Plane is made of pods running in the Kamaji Management Cluste
|
||||
!!! warning "Opening Ports"
|
||||
To make sure worker nodes can join the Tenant Control Plane, you must allow incoming connections to: `${TENANT_ADDR}:${TENANT_PORT}` and `${TENANT_ADDR}:${TENANT_PROXY_PORT}`
|
||||
|
||||
Kamaji does not provide any helper for creation of tenant worker nodes, instead it leverages the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Refer to the [Cluster API guide](guides/cluster-api.md) to learn more about supported providers.
|
||||
Kamaji does not provide any helper for creation of tenant worker nodes, instead it leverages the [Cluster API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Refer to the section [Cluster API](../cluster-api/index.md) to learn more about Cluster API support in Kamaji.
|
||||
|
||||
An alternative approach for joining nodes is to use the `kubeadm` command on each node. Follow the related [documentation](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) in order to:
|
||||
|
||||
@@ -308,7 +317,7 @@ done
|
||||
```
|
||||
|
||||
!!! tip "yaki"
|
||||
This manual process can be further automated to handle the node prerequisites and joining. See [yaki](https://github.com/clastix/yaki) script, which you could modify for your preferred operating system and version. The provided script is just a facility: it assumes all worker nodes are running `Ubuntu 22.04`. Make sure to adapt the script if you're using a different distribution.
|
||||
This manual process can be further automated to handle the node prerequisites and joining. See [yaki](https://goyaki.clastix.io/) script, which you could modify for your preferred operating system and version. The provided script is just a facility: it assumes all worker nodes are running `Ubuntu`. Make sure to adapt the script if you're using a different OS distribution.
|
||||
|
||||
|
||||
Checking the nodes:
|
||||
166
docs/content/getting-started/kamaji-kind.md
Normal file
@@ -0,0 +1,166 @@
|
||||
# Kamaji on Kind
|
||||
This guide will lead you through the process of creating a working Kamaji setup using Kind cluster. The guide requires the following installed on your workstation: `docker`, `kind`, `helm`, and `kubectl`.
|
||||
|
||||
!!! warning "Development Only"
|
||||
Run Kamaji on kind only for development or learning purposes.
|
||||
|
||||
Kamaji is designed to be run on production-grade Kubernetes clusters, such as those provided by cloud providers or on-premises solutions. Kind is not a production-grade Kubernetes cluster, and it is not recommended to run in production environments.
|
||||
|
||||
## Summary
|
||||
|
||||
* [Creating Kind Cluster](#creating-kind-cluster)
|
||||
* [Installing Cert-Manager](#installing-cert-manager)
|
||||
* [Installing MetalLb](#installing-metallb)
|
||||
* [Creating IP Address Pool](#creating-ip-address-pool)
|
||||
* [Installing Kamaji](#installing-kamaji)
|
||||
* [Creating Tenant Control Plane](#creating-tenant-control-plane)
|
||||
|
||||
|
||||
## Creating Kind Cluster
|
||||
|
||||
Create a kind cluster.
|
||||
```
|
||||
kind create cluster --name kamaji
|
||||
```
|
||||
|
||||
This will take a short while for the kind cluster to be created.
|
||||
|
||||
## Installing Cert-Manager
|
||||
|
||||
Kamaji has a dependency on Cert Manager, as it uses dynamic admission control, validating and mutating webhook configurations which are secured by a TLS communication, these certificates are managed by `cert-manager`. Hence, it needs to be added.
|
||||
|
||||
Add the Bitnami Repo to the Helm Manager.
|
||||
|
||||
```
|
||||
helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
```
|
||||
|
||||
Install Cert Manager using Helm
|
||||
|
||||
```
|
||||
helm upgrade --install cert-manager bitnami/cert-manager \
|
||||
--namespace certmanager-system \
|
||||
--create-namespace \
|
||||
--set "installCRDs=true"
|
||||
```
|
||||
|
||||
This will install cert-manager to the cluster. You can watch the progress of the installation on the cluster using the command
|
||||
|
||||
```
|
||||
kubectl get pods -Aw
|
||||
```
|
||||
|
||||
## Installing MetalLb
|
||||
|
||||
MetalLB is used in order to dynamically assign IP addresses to the components, and also define custom IP Address Pools. Install MetalLb using the `kubectl` command for apply the manifest:
|
||||
|
||||
```
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.7/config/manifests/metallb-native.yaml
|
||||
```
|
||||
|
||||
This will install MetalLb onto the cluster with all the necessary resources.
|
||||
|
||||
## Creating IP Address Pool
|
||||
|
||||
Extract the Gateway IP of the network Kind is running on.
|
||||
|
||||
```
|
||||
GW_IP=$(docker network inspect -f '{{range .IPAM.Config}}{{.Gateway}}{{end}}' kind)
|
||||
```
|
||||
|
||||
Modify the IP Address, and create the resource to be added to the cluster to create the IP Address Pool
|
||||
|
||||
```
|
||||
NET_IP=$(echo ${GW_IP} | sed -E 's|^([0-9]+\.[0-9]+)\..*$|\1|g')
|
||||
cat << EOF | sed -E "s|172.19|${NET_IP}|g" | kubectl apply -f -
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: kind-ip-pool
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
- 172.19.255.200-172.19.255.250
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: emtpy
|
||||
namespace: metallb-system
|
||||
EOF
|
||||
```
|
||||
|
||||
## Installing Kamaji
|
||||
- Add the Clastix Repo to the Helm Manager.
|
||||
|
||||
```
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
```
|
||||
|
||||
- Install Kamaji with Helm
|
||||
|
||||
```
|
||||
helm upgrade --install kamaji clastix/kamaji \
|
||||
--namespace kamaji-system \
|
||||
--create-namespace \
|
||||
--set 'resources=null' \
|
||||
--version 0.0.0+latest
|
||||
```
|
||||
|
||||
- Watch the progress of the deployments
|
||||
|
||||
```
|
||||
kubectl get pods -Aw
|
||||
```
|
||||
|
||||
- Verify by first checking Kamaji CRDs
|
||||
|
||||
```
|
||||
kubectl get crds | grep -i kamaji
|
||||
```
|
||||
|
||||
!!! Info "CSI Drivers"
|
||||
Kamaji requires a __storage provider__ installed on the management cluster. Kind by default provides `local-path-provisioner`, but one can have any other CSI Drivers.
|
||||
|
||||
## Creating Tenant Control Plane
|
||||
|
||||
- Create a Tenant Control Plane using the command
|
||||
|
||||
```
|
||||
kubectl apply -f https://raw.githubusercontent.com/clastix/kamaji/master/config/samples/kamaji_v1alpha1_tenantcontrolplane.yaml
|
||||
```
|
||||
|
||||
- Watch the progress of the Tenant Control Plane by
|
||||
|
||||
```
|
||||
kubectl get tcp -w
|
||||
```
|
||||
|
||||
- You can attempt to get the details of the control plane by downloading the `kubeconfig` file
|
||||
|
||||
```
|
||||
# Set the SECRET as KUBECONFIG column listed in the tcp output.
|
||||
SECRET=""
|
||||
kubectl get secret $SECRET -o jsonpath='{.data.admin\.conf}'|base64 -d > /tmp/kamaji.conf
|
||||
```
|
||||
|
||||
- (options) if you run kind in some specific systems with `docker bridge network`, eg macOS, you may need to access the `kind` container, and perform the `kubectl` actions:
|
||||
|
||||
```
|
||||
docker exec -it $(docker container list | grep kamaji-control-plane | awk '{print $1}') bash
|
||||
```
|
||||
|
||||
- Export the `kubeconfig` file to the environment variable `KUBECONFIG`
|
||||
|
||||
```
|
||||
export KUBECONFIG=/tmp/kamaji.conf
|
||||
```
|
||||
|
||||
- Notice that the `kubectl` version changes, and there are no nodes now.
|
||||
|
||||
```
|
||||
kubectl version
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
A Video Tutorial of the [demonstration](https://www.youtube.com/watch?v=hDTvnOyUmo4&t=577s) can also be viewed.
|
||||
@@ -1,15 +1,10 @@
|
||||
# Use Alternative Datastores
|
||||
# Alternative Datastores
|
||||
|
||||
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine) integration.
|
||||
|
||||
## Installing Drivers
|
||||
|
||||
The following `make` recipes help you to setup alternative `Datastore` resources.
|
||||
|
||||
> The default settings are not production grade:
|
||||
> the following scripts are just used to test the Kamaji usage of different drivers.
|
||||
|
||||
On the Management Cluster, you can use the following commands:
|
||||
The following `make` recipes help you to setup alternative `Datastore` resources. On the Management Cluster, you can use the following commands:
|
||||
|
||||
- **MySQL**: `$ make -C deploy/kine/mysql mariadb`
|
||||
|
||||
@@ -17,6 +12,9 @@ On the Management Cluster, you can use the following commands:
|
||||
|
||||
- **NATS**: `$ make -C deploy/kine/nats nats`
|
||||
|
||||
!!! warning "Not for production"
|
||||
The default settings are not production grade: the following scripts are just used to test the Kamaji usage of different drivers.
|
||||
|
||||
## Defining a default Datastore upon Kamaji installation
|
||||
|
||||
Use Helm to install the Kamaji Operator and make sure it uses a datastore with the proper driver `datastore.driver=<MySQL|PostgreSQL|NATS>`.
|
||||
@@ -62,6 +60,4 @@ When the said key is omitted, Kamaji will use the default datastore configured w
|
||||
|
||||
The NATS support is still experimental, mostly because multi-tenancy is **NOT** supported.
|
||||
|
||||
> A `NATS` based DataStore can host one and only one Tenant Control Plane.
|
||||
> When a `TenantControlPlane` is referring to a NATS `DataStore` already used by another instance,
|
||||
> reconciliation will fail and blocked.
|
||||
A `NATS` based DataStore can host one and only one Tenant Control Plane. When a `TenantControlPlane` is referring to a NATS `DataStore` already used by another instance, reconciliation will fail and blocked.
|
||||
|
||||
@@ -39,10 +39,10 @@ velero backup describe tenant-00
|
||||
|
||||
## Restore step
|
||||
|
||||
>_WARNING_: this procedure will restore just the TCP resource.
|
||||
In the event that the related datastore has been lost, you MUST restore it BEFORE continue; to do this, refer to the backup and restore strategy of the datastore of your choice.
|
||||
|
||||
---
|
||||
!!! warning "Restoring Datastore"
|
||||
This procedure will restore just the TCP resource.
|
||||
|
||||
In the event that the related datastore has been lost, you MUST restore it BEFORE continue; to do this, refer to the backup and restore strategy of the datastore of your choice.
|
||||
|
||||
To restore just the desired TCP, simply execute:
|
||||
|
||||
|
||||
@@ -99,11 +99,10 @@ By default, the rotation will occur the day before their expiration.
|
||||
This rotation deadline can be dynamically configured using the Kamaji CLI flag `--certificate-expiration-deadline` using the Go _Duration_ syntax:
|
||||
e.g.: set the value `7d` to trigger the renewal a week before the effective expiration date.
|
||||
|
||||
> Nota Bene:
|
||||
>
|
||||
> Kamaji is responsible for creating the `etcd` client certificate, and the generation of a new one will occur.
|
||||
> For other Datastore drivers, such as MySQL, PostgreSQL, or NATS, the referenced Secret will always be deleted by the Controller to trigger the rotation:
|
||||
> the PKI management, since it's offloaded externally, must provide the renewed certificates.
|
||||
!!! info "Other Datastore Drivers"
|
||||
Kamaji is responsible for creating the `etcd` client certificate, and the generation of a new one will occur.
|
||||
|
||||
For other Datastore drivers, such as MySQL, PostgreSQL, or NATS, the referenced Secret will always be deleted by the Controller to trigger the rotation: the PKI management, since it's offloaded externally, must provide the renewed certificates.
|
||||
|
||||
## Certificate Authority rotation
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
# Cluster APIs Support
|
||||
|
||||
The [Cluster API](https://github.com/kubernetes-sigs/cluster-api) brings declarative, Kubernetes-style APIs to creation of Kubernetes clusters, including configuration and management.
|
||||
|
||||
Kamaji offers seamless integration with the most popular Cluster API Infrastructure Providers. Check the currently supported providers and the roadmap on the related [reposistory](https://github.com/clastix/cluster-api-control-plane-provider-kamaji).
|
||||
|
||||
@@ -1,10 +1,17 @@
|
||||
# Kamaji Console
|
||||
|
||||
This guide will introduce you to the basics of the Kamaji Console, a web UI to help you to view and control your Kamaji setup.
|
||||
|
||||
When you login to the console you are brought to the Tenant Control Planes, which allows you to quickly understand the state of your Kamaji setup at a glance. It shows summary information about all the Tenant Control Plane objects, including: name, namespace, status, endpoint, version, and datastore.
|
||||
|
||||

|
||||
|
||||
## Install with Helm
|
||||
The Kamaji Console is a web interface running on the Kamaji Management Cluster that you can install with Helm. Check the Helm Chart [documentation](https://github.com/clastix/kamaji-console) for all the available settings.
|
||||
|
||||
The Kamaji Console requires a Secret in the Kamaji Management Cluster that contains the configuration and credentials to access the console from the browser. You can have the Helm Chart generate it for you, or create it yourself and provide the name of the Secret during installation. Before to install the Kamaji Console, access your workstation, replace the placeholders with actual values, and execute the following command:
|
||||
The Kamaji Console requires a Secret in the Kamaji Management Cluster that contains the configuration and credentials to access the console from the browser. You can have the Helm Chart generate it for you, or create it yourself and provide the name of the Secret during installation.
|
||||
|
||||
Before to install the Kamaji Console, access your workstation, replace the placeholders with actual values, and execute the following command:
|
||||
|
||||
```bash
|
||||
# The secret is required, otherwise the installation will fail
|
||||
@@ -32,11 +39,6 @@ Install the Chart with the release name `console` in the `kamaji-system` namespa
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm -n kamaji-system install console clastix/kamaji-console
|
||||
```
|
||||
|
||||
Show the status:
|
||||
|
||||
```
|
||||
helm status console -n kamaji-system
|
||||
```
|
||||
|
||||
@@ -54,39 +56,13 @@ and point the browser to `http://127.0.0.1:8080/ui` to access the console. Login
|
||||
!!! note "Expose with Ingress"
|
||||
The Kamaji Console can be exposed with an ingress. Refer the Helm Chart documentation on how to configure it properly.
|
||||
|
||||
## Explore the Kamaji Console
|
||||
The Kamaji Console provides a high level view of all Tenant Control Planes configured in your Kamaji setup. When you login to the console you are brought to the Tenant Control Planes view, which allows you to quickly understand the state of your Kamaji setup at a glance. It shows summary information about all the Tenant Control Plane objects, including: name, namespace, status, endpoint, version, and datastore.
|
||||
|
||||

|
||||
|
||||
From this view, you can also create a new Tenant Control Plane from a basic placeholder in yaml format:
|
||||
|
||||

|
||||
|
||||
### Working with Tenant Control Plane
|
||||
From the main view, clicking on a Tenant Control Plane row will bring you to the detailed view. This view shows you all the details about the selected Tenant Control Plane, including all child components: pods, deployment, service, config maps, and secrets. From this view, you can also view, copy, and download the `kubeconfig` to access the Tenant Control Plane as tenant admin.
|
||||
|
||||

|
||||
|
||||
### Working with Datastore
|
||||
|
||||
From the menu bar on the left, clicking on the Datastores item, you can access the list of provisioned Datastores.
|
||||
It shows a summary about datastores, including name and the used driver, i.e. `etcd`, `MySQL`, `PostgreSQL`, and `NATS`.
|
||||
|
||||

|
||||
|
||||
From this view, you can also create, delete, edit, and inspect the single datastore.
|
||||
|
||||
### Additional Operations
|
||||
The Kamaji Console offers additional capabilities as part of the commercial edition Clastix Operating Platform:
|
||||
## Additional Operations
|
||||
The Kamaji Console offers additional capabilities unlocked by Clastix Enterprise Platform:
|
||||
|
||||
- Infrastructure Drivers Management
|
||||
- Applications Delivery via GitOps Operators
|
||||
- Applications Delivery
|
||||
- Centralized Authentication and Access Control
|
||||
- Auditing and Logging
|
||||
- Monitoring
|
||||
- Backup & Restore
|
||||
|
||||
!!! note "Ready for more?"
|
||||
To purchase entitlement to Clastix Operating Platform please contact hello@clastix.io.
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# General
|
||||
# Contribute
|
||||
|
||||
Thank you for your interest in contributing to Kamaji. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
|
||||
|
||||
@@ -160,7 +160,6 @@ tenant-00 v1.25.2 Ready 192.168.32.200:6443 tenant-00-admin-kubec
|
||||
|
||||
During the datastore migration, the Tenant Control Plane is put in read-only mode to avoid misalignments between source and destination datastores. If tenant users try to update the data, an admission controller denies the request with the following message:
|
||||
|
||||
|
||||
```shell
|
||||
Error from server (the current Control Plane is in freezing mode due to a maintenance mode,
|
||||
all the changes are blocked: removing the webhook may lead to an inconsistent state upon its completion):
|
||||
@@ -169,7 +168,16 @@ admission webhook "catchall.migrate.kamaji.clastix.io" denied the request
|
||||
|
||||
After a while, depending on the amount of data to migrate, the Tenant Control Plane is put back in full operating mode by the Kamaji controller.
|
||||
|
||||
> Please, note the datastore migration leaves the data on the default datastore, so you have to remove it manually.
|
||||
Migration is expected to complete in 5 minutes.
|
||||
However, that timeout can be customized at the `TenantControlPlane` level with the annotation `kamaji.clastix.io/migration-timeout` with a Go-duration value (e.g.: `5m`).
|
||||
|
||||
!!! info "Leftover"
|
||||
Please, note the datastore migration leaves the data on the default datastore, so you have to remove it manually.
|
||||
|
||||
!!! info "Avoiding stale DataStore content"
|
||||
When migrating `TenantControlPlane` across DataStore, a collision with the __schema__ name could happen,
|
||||
leading to unexpected results such as old data still available.
|
||||
The annotation `kamaji.clastix.io/cleanup-prior-migration=true` allows to enforce the clean-up of the target `DataStore` schema in case of collision.
|
||||
|
||||
## Post migration
|
||||
After migrating data to the new datastore, complete the migration procedure by restarting the `kubelet.service` on all the tenant worker nodes.
|
||||
@@ -180,7 +188,7 @@ After migrating data to the new datastore, complete the migration procedure by r
|
||||
When migrating between datastores, the Kamaji controller automatically creates a migration job to transfer data from the source to the destination datastore. By default, this job uses the same image version as the running Kamaji controller. If you need to use a different image version for the migration job, you can specify it by passing extra arguments to the controller:
|
||||
|
||||
```shell
|
||||
helm upgrade kamaji clastix/kamaji -n kamaji-system
|
||||
helm upgrade kamaji clastix/kamaji --version ${CHART_VERSION} -n kamaji-system
|
||||
--set extraArgs[0]=--migrate-image=custom/kamaji:version`
|
||||
```
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Manage Tenant Control Planes with GitOps
|
||||
# GitOps
|
||||
|
||||
This guide describe a declarative way to deploy Kubernetes add-ons across multiple Tenant Clusters, the GitOps-way. An admin may need to apply a specific workload into Tenant Clusters and ensure is constantly reconciled, no matter what the tenants will do in their clusters. Examples include installing monitoring agents, ensuring specific policies, installing infrastructure operators like Cert Manager and so on.
|
||||
|
||||
@@ -27,7 +27,8 @@ NAME VERSION STATUS CONTROL-PLANE-ENDPOINT KUBECONFIG
|
||||
tenant1 v1.25.1 Ready 172.18.0.2:31443 tenant1-admin-kubeconfig 108s
|
||||
```
|
||||
|
||||
> As the *admin* user has *cluster-admin* `ClusterRole` it will have the necessary privileges to operate on Custom Resources too.
|
||||
!!! info "Admin Permissions"
|
||||
As the *admin* user has *cluster-admin* `ClusterRole` it will have the necessary privileges to operate on Custom Resources too.
|
||||
|
||||
Given that Flux it's installed in the *Management Cluster* - guide [here](https://fluxcd.io/flux/installation/) - resources can be ensured for specifics Tenant Clusters, by filling the `spec.kubeConfig` field of the Flux reconciliation resource.
|
||||
|
||||
209
docs/content/guides/monitoring.md
Normal file
@@ -0,0 +1,209 @@
|
||||
# Tenant Control Plane Monitoring
|
||||
|
||||
Kamaji exposes a set of metrics that can be used to monitor the health of the Tenant Control Plane (TCP) and its components. The metrics are exposed in Prometheus format and can be scraped by a Prometheus server instance running in the Management Cluster.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
Ensure you have installed the [Prometheus Operator](https://prometheus.io/community/) in the Management Cluster and that it is configured properly. You should verify that Service Monitor CRDs are installed in the Management Cluster as they are used to tell Prometheus how to scrape the metrics from the TCP.
|
||||
|
||||
## Enable metrics scraping
|
||||
|
||||
On the Management Cluster, in the same namespace as the Tenant Control Plane, create a Service Monitor that instructs Prometheus how to scrape the metrics from the TCP.
|
||||
|
||||
First, create a service for exposing metric endpoints from TCP components. The following is an example for a Tenant Control Plane named `charlie` deployed in the `default` namespace:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
kamaji.clastix.io/name: charlie-metrics
|
||||
name: charlie-metrics
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: kube-apiserver-metrics
|
||||
port: 6443
|
||||
protocol: TCP
|
||||
targetPort: 6443
|
||||
- name: kube-controller-manager-metrics
|
||||
port: 10257
|
||||
protocol: TCP
|
||||
targetPort: 10257
|
||||
- name: kube-scheduler-metrics
|
||||
port: 10259
|
||||
protocol: TCP
|
||||
targetPort: 10259
|
||||
selector:
|
||||
kamaji.clastix.io/name: charlie
|
||||
type: ClusterIP
|
||||
```
|
||||
|
||||
Then create a Service Monitor that tells Prometheus how to scrape the metrics from the TCP:
|
||||
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
release: kube-prometheus-stack
|
||||
name: kube-prometheus-stack-tcp-charlie
|
||||
namespace: default
|
||||
spec:
|
||||
endpoints:
|
||||
# API Server endpoint
|
||||
- port: kube-apiserver-metrics
|
||||
scheme: https
|
||||
path: /metrics
|
||||
interval: 15s
|
||||
scrapeTimeout: 10s
|
||||
tlsConfig:
|
||||
# skip certificate verification
|
||||
insecureSkipVerify: true
|
||||
# Client certificate for authentication
|
||||
cert:
|
||||
secret:
|
||||
name: charlie-api-server-kubelet-client-certificate
|
||||
key: apiserver-kubelet-client.crt
|
||||
# Client key for authentication
|
||||
keySecret:
|
||||
name: charlie-api-server-kubelet-client-certificate
|
||||
key: apiserver-kubelet-client.key
|
||||
metricRelabelings:
|
||||
- action: drop
|
||||
regex: apiserver_request_duration_seconds_bucket;(0.15|0.2|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2|3|3.5|4|4.5|6|7|8|9|15|25|40|50)
|
||||
sourceLabels:
|
||||
- __name__
|
||||
- le
|
||||
relabelings:
|
||||
- action: replace
|
||||
targetLabel: cluster
|
||||
replacement: charlie
|
||||
- action: replace
|
||||
targetLabel: job
|
||||
replacement: apiserver
|
||||
# Controller Manager endpoint
|
||||
- port: kube-controller-manager-metrics
|
||||
scheme: https
|
||||
path: /metrics
|
||||
interval: 15s
|
||||
scrapeTimeout: 10s
|
||||
tlsConfig:
|
||||
# skip certificate verification
|
||||
insecureSkipVerify: true
|
||||
# Client certificate for authentication
|
||||
cert:
|
||||
secret:
|
||||
name: charlie-api-server-kubelet-client-certificate
|
||||
key: apiserver-kubelet-client.crt
|
||||
# Client key for authentication
|
||||
keySecret:
|
||||
name: charlie-api-server-kubelet-client-certificate
|
||||
key: apiserver-kubelet-client.key
|
||||
relabelings:
|
||||
- action: replace
|
||||
targetLabel: cluster
|
||||
replacement: charlie
|
||||
- action: replace
|
||||
targetLabel: job
|
||||
replacement: kube-controller-manager
|
||||
# Scheduler endpoint
|
||||
- port: kube-scheduler-metrics
|
||||
scheme: https
|
||||
path: /metrics
|
||||
interval: 15s
|
||||
scrapeTimeout: 10s
|
||||
tlsConfig:
|
||||
# skip certificate verification
|
||||
insecureSkipVerify: true
|
||||
# Client certificate for authentication
|
||||
cert:
|
||||
secret:
|
||||
name: charlie-api-server-kubelet-client-certificate
|
||||
key: apiserver-kubelet-client.crt
|
||||
# Client key for authentication
|
||||
keySecret:
|
||||
name: charlie-api-server-kubelet-client-certificate
|
||||
key: apiserver-kubelet-client.key
|
||||
relabelings:
|
||||
- action: replace
|
||||
targetLabel: cluster
|
||||
replacement: charlie
|
||||
- action: replace
|
||||
targetLabel: job
|
||||
replacement: kube-scheduler
|
||||
selector:
|
||||
matchLabels:
|
||||
kamaji.clastix.io/name: charlie-metrics
|
||||
```
|
||||
|
||||
!!! tip "TLS certificates"
|
||||
To access metrics endpoints, the Prometheus must authenticate with the control plane endpoints. You can use the `<tcp_name>-api-server-kubelet-client-certificate` secret. This secret is automatically created by Kamaji in the namespace and contains the client certificate and key needed for the control plane components.
|
||||
|
||||
Finally, ensure the Prometheus service account, e.g. `kube-prometheus-stack-prometheus` has the necessary permissions to access the secret containing the certificates. The following is an example of a `ClusterRole` and `ClusterRoleBinding` that grants the required permissions:
|
||||
|
||||
```yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: prometheus-secret-access
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-prometheus-stack-prometheus
|
||||
namespace: monitoring-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: prometheus-secret-reader
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: prometheus-secret-reader
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
```
|
||||
|
||||
For production environments, a fined grained approach is recommended to restrict access only to the secrets containing the required certificates.
|
||||
|
||||
## Accessing metrics
|
||||
|
||||
Scraped metrics are available in the Prometheus server. You can access the Prometheus dashboard to view the metrics and create alerts based on them. If you use the same Prometheus instance for monitoring both the Management Cluster and Tenant Control Planes, you must relabel the scraped metrics to differentiate between them. This can be achieved in the `values.yaml` file used to install the Prometheus Operator Helm Chart:
|
||||
|
||||
```yaml
|
||||
...
|
||||
prometheus:
|
||||
...
|
||||
kubeApiServer:
|
||||
serviceMonitor:
|
||||
relabelings:
|
||||
- action: replace
|
||||
targetLabel: cluster
|
||||
replacement: kamaji
|
||||
kubeControllerManager:
|
||||
serviceMonitor:
|
||||
relabelings:
|
||||
- action: replace
|
||||
targetLabel: cluster
|
||||
replacement: kamaji
|
||||
kubeScheduler:
|
||||
serviceMonitor:
|
||||
relabelings:
|
||||
- action: replace
|
||||
targetLabel: cluster
|
||||
replacement: kamaji
|
||||
...
|
||||
```
|
||||
|
||||
## Grafana
|
||||
|
||||
**Grafana** is a widely used tool for visualizing metrics. You can create custom dashboards for Tenant Control Planes and visualize the metrics scraped by Prometheus. The Prometheus Operator Helm Chart also installs Grafana with a set of predefined dashboards for Kubernetes Control Plane components: `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager`. These dashboards can serve as a starting point for creating custom dashboards for Tenant Control Planes or can be used as-is.
|
||||
|
||||
!!! tip "Multi-Cluster Mode"
|
||||
In Grafana, enable the "Multi-Cluster Mode" option for improved visualization of metrics. This option is available in the Grafana settings.
|
||||
|
||||
That's it!
|
||||
46
docs/content/guides/terraform.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Terraform
|
||||
|
||||
While [Cluster API](https://github.com/kubernetes-sigs/cluster-api) is a common approach for managing Kubernetes infrastructure declaratively, there are situations where Cluster API may not be suitable or desired. This can occur for various reasons, such as:
|
||||
|
||||
- The need to keep control plane management separate from infrastructure management
|
||||
- When the infrastructure provider hosting worker nodes lacks native Cluster API support
|
||||
- Existing Terraform-based infrastructure workflows that need integration
|
||||
- Specific compliance or organizational requirements
|
||||
|
||||
In these scenarios, an alternative approach is to provision worker nodes using [`yaki`](https://goyaki.clastix.io/), a wrapper around the standard `kubeadm` utility developed and maintained by [Clastix Labs](https://github.com/clastix).
|
||||
|
||||
## How It Works
|
||||
|
||||
The workflow combines [Terraform](https://developer.hashicorp.com/terraform) for infrastructure provisioning with `yaki` for Kubernetes node bootstrapping:
|
||||
|
||||
1. **Terraform** provisions the virtual machines on your chosen infrastructure
|
||||
2. **`yaki`** installs all required Kubernetes dependencies on each machine
|
||||
3. **Bootstrap tokens** automatically join the machines to your Kamaji tenant control plane
|
||||
|
||||
## Terraform Modules
|
||||
|
||||
The [terraform-kamaji-node-pool](https://github.com/clastix/terraform-kamaji-node-pool) repository provides comprehensive Terraform modules for provisioning Kubernetes worker nodes across multiple cloud providers. The repository is structured to support various infrastructure providers with Terraform support, including:
|
||||
|
||||
- **AWS** - Auto Scaling Groups with automatic scaling
|
||||
- **Azure** - Virtual Machine Scale Sets *(planned)*
|
||||
- **vSphere** - Enterprise-grade virtual machines
|
||||
- **Proxmox** - Direct VM management on Proxmox VE
|
||||
- **vCloud** - Multi-tenant VMs on VMware Cloud Director
|
||||
|
||||
### Key Features
|
||||
|
||||
- **Multi-cloud support** with consistent interfaces across providers
|
||||
- **Automatic bootstrap token management** for secure cluster joining
|
||||
- **Shared cloud-init templates** for consistent node configuration
|
||||
- **Ready-to-use provider implementations** with example configurations
|
||||
- **Modular architecture** allowing custom integrations
|
||||
|
||||
### Getting Started
|
||||
|
||||
For detailed usage instructions, see the [project documentation](https://github.com/clastix/terraform-kamaji-node-pool#readme).
|
||||
|
||||
!!! tip "Production Considerations"
|
||||
The Terraform modules serve as comprehensive examples and starting points for Kamaji integration. While they include production-ready features like security groups, IAM policies, and anti-affinity rules, you should customize them to meet your specific security, compliance, and operational requirements before using them in production environments.
|
||||
|
||||
!!! note "Bootstrap Security"
|
||||
The modules automatically generate secure bootstrap tokens with limited lifetime and scope. These tokens are used only for the initial node join process and are cleaned up after successful tenent cluster formation.
|
||||
|
Before Width: | Height: | Size: 150 KiB |
|
Before Width: | Height: | Size: 207 KiB |
|
Before Width: | Height: | Size: 249 KiB |
|
Before Width: | Height: | Size: 304 KiB |
BIN
docs/content/images/kamaji-console.png
Normal file
|
After Width: | Height: | Size: 177 KiB |
@@ -1,48 +1,7 @@
|
||||
# Kamaji
|
||||
|
||||
**Kamaji** is the **Kubernetes Control Plane Manager**. It operates Kubernetes at scale with a fraction of the operational burden.
|
||||
|
||||
## How it works
|
||||
Kamaji turns any Kubernetes cluster into a _“Management Cluster”_ to orchestrate other Kubernetes clusters called _“Tenant Clusters”_. Kamaji is special because the Control Plane components are running inside pods instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
|
||||
|
||||
<img src="images/architecture.png" width="600">
|
||||
|
||||
View [Concepts](concepts.md) for a deeper understanding of principles behind Kamaji's design.
|
||||
|
||||
!!! info "CNCF Compliance"
|
||||
All the Tenant Clusters built with Kamaji are fully compliant [CNCF Certified Kubernetes](https://www.cncf.io/certification/software-conformance/) and are compatible with the standard toolchains everybody knows and loves.
|
||||
|
||||
## Getting started
|
||||
|
||||
Please refer to the [Getting Started guide](getting-started.md) to deploy a minimal setup of Kamaji.
|
||||
|
||||
|
||||
## FAQs
|
||||
Q. What does Kamaji mean?
|
||||
|
||||
A. Kamaji is named as the character _Kamajī_ (釜爺, lit. "Boiler Geezer") from the Japanese movie [_Spirited Away_](https://en.wikipedia.org/wiki/Spirited_Away). Kamajī is an elderly man with six, long arms who operates the boiler room of the Bathhouse. The silent professional, whom no one sees, but who gets the hot, fragrant water to all the guests, like our Kamaji provides Kubernetes as a service!
|
||||
|
||||
Q. Is Kamaji another Kubernetes distribution yet?
|
||||
|
||||
A. No, Kamaji is a Kubernetes Operator you can install on top of any Kubernetes cluster to provide hundreds or thousands of managed Kubernetes clusters as a service. The tenant clusters made with Kamaji are conformant CNCF Kubernetes clusters as we leverage [`kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
|
||||
|
||||
Q. How is Kamaji different from typical multi-cluster management solutions?
|
||||
|
||||
A. Most of the existing multi-cluster management solutions provision specific infrastructure for the control plane, in most cases dedicated machines. Kamaji is special because the control plane of the downstream clusters are regular pods running in the management cluster. This solution makes running control plane at scale cheaper and easier to deploy and operate.
|
||||
|
||||
Q. Is it safe to run Kubernetes control plane components in a pod instead of dedicated virtual machines?
|
||||
|
||||
A. Yes, the tenant control plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used, no forks!.
|
||||
|
||||
Q. How is Kamaji different from managed Kubernetes services offered by Public Clouds?
|
||||
|
||||
A. Kamaji gives you full control over all your Kubernetes infrastructures, offering unparalleled consistency across disparate environments: cloud, data-center, and edge while simplifying and centralizing operations, maintenance, and management tasks. Unlike other Managed Kubernetes services, Kamaji allows you to connect worker nodes from any infrastructure, providing you greater freedom, flexibility, and consistency than public Managed Kubernetes services.
|
||||
|
||||
Q. How Kamaji differs from Cluster API?
|
||||
|
||||
A. Kamaji and Cluster API complement each other. Kamaji's core idea is having a more efficient control plane management. Cluster API provides a declarative approach to clusters bootstrap and lifecycle management across different environments, cloud providers, and on-premises infrastructures. Thus combined together you get the best of class: Kamaji by simplifying the Control Plane management, Cluster API to abstract from the infrastructure. See supported [CAPI providers](guides/cluster-api.md) by Kamaji.
|
||||
|
||||
Q. You already provide a Kubernetes multi-tenancy solution with [Capsule](https://capsule.clastix.io). Why does Kamaji matter?
|
||||
|
||||
A. A multi-tenancy solution, like Capsule shares the Kubernetes control plane among all tenants keeping tenant namespaces isolated by policies. While the solution is the right choice by balancing between features and ease of usage, there are cases where a tenant user requires access to the control plane, for example, when a tenant requires to manage CRDs on his own. With Kamaji, you can provide full cluster admin permissions to the tenant.
|
||||
|
||||
---
|
||||
template: home.html
|
||||
title: Home
|
||||
hide:
|
||||
- navigation
|
||||
- toc
|
||||
---
|
||||
|
||||
127
docs/content/llms.txt
Normal file
@@ -0,0 +1,127 @@
|
||||
# Kamaji
|
||||
|
||||
> Kamaji is the Control Plane Manager for Kubernetes, enabling multi-tenant, upstream-compliant clusters as pods in a central management cluster. Developed and maintained by Clastix, Kamaji brings operational efficiency, strong isolation, and cloud-native flexibility to Kubernetes at scale.
|
||||
|
||||
Kamaji runs Kubernetes control planes as pods in a central Management Cluster, enabling fast, scalable, and cost-effective multi-tenancy. Each Tenant Cluster is fully isolated, CNCF-compliant, and managed declaratively using Kubernetes CRDs. Kamaji integrates with Cluster API, supports GitOps workflows, and offers enterprise-grade add-ons for advanced use cases.
|
||||
Kamaji is like a fleet of Site Reliability Engineers with expertise codified into its logic, working 24/7 to keep your Control Planes up and running.
|
||||
|
||||
## Architecture
|
||||
|
||||
- **Management Cluster:** Hosts Kamaji and all Tenant Control Planes as pods, leveraging Kubernetes reliability and scalability.
|
||||
- **Tenant Clusters:** User-facing clusters, each with a dedicated control plane running in the Management Cluster. Full isolation between tenants.
|
||||
- **Tenant Worker Nodes:** Machines that join Tenant Clusters, running only tenant workloads for strong security and resource isolation.
|
||||
|
||||
## Main Features
|
||||
|
||||
- Multi-Tenancy: Deploy multiple Kubernetes control planes as pods within a single management cluster. Each control plane operates independently, ensuring complete isolation between tenants.
|
||||
- Upstream Kubernetes: Uses unmodified upstream Kubernetes components and leverages kubeadm, the default tool for cluster bootstrapping and management.
|
||||
- Infrastructure Agnostic: Connect worker nodes from any infrastructure provider. Supports bare metal, virtual machines, and cloud instances, allowing hybrid and multi-cloud deployments.
|
||||
- Resource Optimization: Control planes run as pods, sharing the management cluster's resources efficiently. Scale control planes independently based on actual usage patterns and requirements.
|
||||
- Cluster API Integration: Seamlessly integrates with Cluster API providers for automated infrastructure provisioning and lifecycle management across different environments.
|
||||
- High Availability: Supports multi-node control plane deployments with distributed etcd clusters. Includes automated failover and recovery mechanisms for production workloads.
|
||||
- Full CNCF compliance and seamless integration with Cluster API, GitOps, and IaC tools.
|
||||
|
||||
## Use Cases
|
||||
|
||||
- Private Cloud: Optimize your data center resources by running multiple Kubernetes control planes. Perfect for organizations that need complete control over their infrastructure while maintaining strict isolation between different business units.
|
||||
- Public Cloud: Build independent public cloud offerings with Kubernetes-as-a-Service capabilities. Provide the same user experience as major cloud providers while maintaining full control over the infrastructure and operational costs.
|
||||
- Bare Metal: Maximize hardware utilization by running multiple control planes on your physical infrastructure. Ideal for environments where direct hardware access, network performance, and data locality are critical.
|
||||
- Edge Computing: Run lightweight Kubernetes clusters at the edge while managing their control planes centrally. Reduce the hardware footprint at edge locations by keeping control planes in your central management cluster.
|
||||
- Platform Engineering: Build internal Kubernetes platforms with standardized cluster provisioning and management. Enable self-service capabilities while maintaining centralized control and governance over all clusters.
|
||||
- Bring Your Own Cloud: Create your own managed Kubernetes service using standard upstream components. Provide dedicated clusters to your users while maintaining operational efficiency through centralized control plane management.
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
- What does Kamaji mean? Kamaji is named after Kamajī (かまじ) from the Japanese movie Spirited Away. Kamajī is the boiler room operator who efficiently manages the bathhouse's water system—just like Kamaji manages Kubernetes clusters!
|
||||
- Is Kamaji another Kubernetes distribution? No, Kamaji is a Kubernetes Operator that provides managed Kubernetes clusters as a service, leveraging kubeadm for conformant CNCF Kubernetes clusters.
|
||||
- How is it different from typical solutions? Kamaji runs the Control Plane as regular pods in the Management Cluster, offering it as a service and making it more cost-effective and easier to operate at scale.
|
||||
- How does it compare to public cloud services? Kamaji gives you full control over your Kubernetes infrastructure, offering consistency across cloud, data center, and edge while simplifying centralized operations.
|
||||
- How does it differ from Cluster API? They complement each other: Kamaji simplifies Control Plane management, while Cluster API handles infrastructure abstraction and lifecycle management.
|
||||
- Why Kamaji when Capsule exists? While Capsule provides a single control plane with isolated namespaces, Kamaji provides dedicated control planes when tenants need full cluster admin permissions.
|
||||
- Do you provide support? Yes, Clastix offers subscription-based, enterprise-grade support plans for Kamaji. Please contact us to discuss your support needs.
|
||||
|
||||
## About Clastix
|
||||
|
||||
Clastix is a technology company specializing in cloud-native solutions and Kubernetes platforms, with a strong history of delivering advanced, production-grade systems for cloud computing builders. Clastix has collaborated with a number of CSPs worldwide, enabling them to build resilient, scalable cloud infrastructures aligned with modern digital requirements. Beyond the development of Kamaji, Clastix delivers complementary services including integration with enterprise ecosystems, strategic consulting for infrastructure transformation, and training in cloud-native and Kubernetes best practices.
|
||||
|
||||
## Releases and Versions
|
||||
|
||||
Kamaji versions are available in different types of release artifacts.
|
||||
|
||||
### Latest Releases
|
||||
|
||||
CI is responsible for building OCI and Helm Charts for every commit in the main branch (master). The latest artifacts are aimed at rapid development tests and evaluation processes.
|
||||
|
||||
### Edge Releases
|
||||
|
||||
Edge Release artifacts are published on a monthly basis as part of the open source project. Edge Releases are generally considered production ready.
|
||||
|
||||
### Stable Releases
|
||||
|
||||
Clastix Labs no longer provides release artifacts following its own semantic versioning: this choice has been made to help monetize Clastix in the development and maintenance of the Kamaji project. Stable artifacts such as OCI (containers) and Helm Charts are available on a subscription basis maintained by CLASTIX.
|
||||
|
||||
## Documentation
|
||||
|
||||
### Getting Started
|
||||
|
||||
- [Getting Started](https://github.com/clastix/kamaji/blob/master/docs/content/getting-started/index.md): Step-by-step setup for different environments
|
||||
- [Getting Started on a Generic Infrastructure](https://github.com/clastix/kamaji/blob/master/docs/content/getting-started/kamaji-generic.md): The process of creating a working Kamaji setup on a generic infrastructure.
|
||||
|
||||
### Concepts
|
||||
|
||||
- [Concepts](https://github.com/clastix/kamaji/blob/master/docs/content/concepts/index.md): Core ideas and architecture
|
||||
|
||||
### Cluster API Support
|
||||
|
||||
- [Cluster API Support](https://github.com/clastix/kamaji/blob/master/docs/content/cluster-api/index.md): How Kamaji supports Cluster APIs for declarative cluster provisioning
|
||||
- [Kamaji Cluster API Provider](https://github.com/clastix/kamaji/blob/master/docs/content/cluster-api/control-plane-provider.md): Kamaji can act as a Cluster API Control Plane provider
|
||||
- [Kamaji Cluster API Class](https://github.com/clastix/kamaji/blob/master/docs/content/cluster-api/cluster-class.md): Kamaji supports ClusterClass, a simple way to create many clusters of a similar shape.
|
||||
- [Kamaji Cluster Autoscaler](https://github.com/clastix/kamaji/blob/master/docs/content/cluster-api/cluster-autoscaler.md): Kamaji supports the Cluster Autoscaler through Cluster API.
|
||||
- [Kamaji Cluster API Infra Providers](https://github.com/clastix/kamaji/blob/master/docs/content/cluster-api/other-providers.md): Kamaji offers seamless integration with the most popular Cluster API Infrastructure Providers
|
||||
|
||||
### Guides
|
||||
|
||||
- [Kamaji Alternative Datastores](https://github.com/clastix/kamaji/blob/master/docs/content/guides/alternative-datastore.md): Kamaji offers the possibility of using different storage systems
|
||||
- [Kamaji Backup & Restore](https://github.com/clastix/kamaji/blob/master/docs/content/guides/backup-and-restore.md): How to back up and restore TCP resources on the Management Cluster using Velero
|
||||
- [Kamaji Certificates Lifecycle](https://github.com/clastix/kamaji/blob/master/docs/content/guides/certs-lifecycle.md): Kamaji is able to automatically rotate cluster certificates
|
||||
- [Kamaji Datastore Migration](https://github.com/clastix/kamaji/blob/master/docs/content/guides/datastore-migration.md): Kamaji live migrates Tenant data from one datastore to another
|
||||
- [Kamaji GitOps Approach](https://github.com/clastix/kamaji/blob/master/docs/content/guides/gitops.md): Describes a declarative way to deploy Kubernetes add-ons across multiple Tenant Clusters, the GitOps way
|
||||
- [Tenant Cluster Upgrade](https://github.com/clastix/kamaji/blob/master/docs/content/guides/upgrade.md): How to upgrade a Tenant Cluster
|
||||
- [Tenant Control Plane Monitoring](https://github.com/clastix/kamaji/blob/master/docs/content/guides/monitoring.md): How to monitor a Tenant Control Plane
|
||||
- [Terraform Support](https://github.com/clastix/kamaji/blob/master/docs/content/guides/terraform.md): How Kamaji supports Infrastructure as Code (IaC)
|
||||
- [Benchmark](https://github.com/clastix/kamaji/blob/master/docs/content/reference/benchmark.md): Kamaji has been designed to operate a large scale of Kubernetes Tenant Control Plane resources
|
||||
- [CNCF Conformance](https://github.com/clastix/kamaji/blob/master/docs/content/reference/conformance.md): All the "Tenant Clusters" built with Kamaji are CNCF conformant
|
||||
- [Releases and Versions](https://github.com/clastix/kamaji/blob/master/docs/content/reference/versioning.md): Kamaji versions are available in different types of release artifacts
|
||||
- [API Reference](https://github.com/clastix/kamaji/blob/master/docs/content/reference/api.md): Kamaji Custom Resources full API documentation
|
||||
|
||||
## GitHub
|
||||
|
||||
- [Readme](https://github.com/clastix/kamaji/blob/master/README.md): GitHub Readme file
|
||||
- [License](https://github.com/clastix/kamaji/blob/master/LICENSE): Apache 2.0 license
|
||||
|
||||
## Support
|
||||
|
||||
- [Contact Clastix](https://clastix.io/contact): Commercial support and inquiries
|
||||
- [Kubernetes Slack #kamaji](https://kubernetes.slack.com/archives/C03GLTTMWNN): Community chat
|
||||
|
||||
## API Reference
|
||||
|
||||
- [TenantControlPlane API](https://github.com/clastix/kamaji/blob/master/docs/content/reference/api.md#tenantcontrolplane): Full spec for the TenantControlPlane resource
|
||||
- [Datastore API](https://github.com/clastix/kamaji/blob/master/docs/content/reference/api.md#datastore): Full spec for the Datastore resource
|
||||
|
||||
## Adopters
|
||||
|
||||
- [Adopters List](https://github.com/clastix/kamaji/blob/master/ADOPTERS.md): Organizations using Kamaji
|
||||
|
||||
## Project Status
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
## User Quote
|
||||
|
||||
> "Kamaji works exactly as expected: it's 'simple', efficient, scalable, and I especially appreciate how Clastix has always been available for technical discussions and support throughout these two years of collaboration."
|
||||
>
|
||||
> — Jeremie Monsinjon, Head of Containers @ OVHCloud
|
||||
@@ -1,24 +1,19 @@
|
||||
# CNCF Conformance
|
||||
For organizations using Kubernetes, conformance enables interoperability, consistency, and confirmability between Kubernetes installations. The Cloud Computing Native Foundation - CNCF - provides the [Certified Kubernetes Conformance Program](https://www.cncf.io/certification/software-conformance/).
|
||||
For organizations using Kubernetes, conformance enables interoperability, consistency, and confirmability between Kubernetes installations.
|
||||
|
||||
The standard set of conformance tests is currently those defined by the `[Conformance]` tag in the
|
||||
[kubernetes e2e](https://github.com/kubernetes/kubernetes/tree/master/test/e2e) suite.
|
||||
|
||||
All the _“Tenant Clusters”_ built with Kamaji are CNCF conformant:
|
||||
|
||||
- [v1.23](https://github.com/cncf/k8s-conformance/pull/2194)
|
||||
- [v1.24](https://github.com/cncf/k8s-conformance/pull/2193)
|
||||
- [v1.25](https://github.com/cncf/k8s-conformance/pull/2188)
|
||||
- [v1.26](https://github.com/cncf/k8s-conformance/pull/2787)
|
||||
- [v1.27](https://github.com/cncf/k8s-conformance/pull/2786)
|
||||
- [v1.28](https://github.com/cncf/k8s-conformance/pull/2785)
|
||||
- [v1.29](https://github.com/cncf/k8s-conformance/pull/3273)
|
||||
- [v1.30](https://github.com/cncf/k8s-conformance/pull/3274)
|
||||
The Cloud Computing Native Foundation (_CNCF_) provides the [Certified Kubernetes Conformance Program](https://www.cncf.io/certification/software-conformance/).
|
||||
|
||||
<p align="left" style="padding: 6px 6px">
|
||||
<img src="https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/certified-kubernetes/versionless/color/certified-kubernetes-color.png" width="100" />
|
||||
</p>
|
||||
|
||||
All the _“Tenant Clusters”_ built with Kamaji are CNCF conformant.
|
||||
|
||||
!!! note "Conformance Test Suite"
|
||||
The standard set of conformance tests is currently those defined by the `[Conformance]` tag in the [kubernetes e2e](https://github.com/kubernetes/kubernetes/tree/master/test/e2e) repository.
|
||||
|
||||
|
||||
|
||||
## Running the conformance tests
|
||||
|
||||
The standard tool for running CNCF conformance tests is [Sonobuoy](https://github.com/vmware-tanzu/sonobuoy). Sonobuoy is
|
||||
@@ -38,7 +33,7 @@ Deploy a Sonobuoy pod to your Tenant Cluster with:
|
||||
sonobuoy run --mode=certified-conformance
|
||||
```
|
||||
|
||||
> You can run the command synchronously by adding the flag `--wait` but be aware that running the conformance tests can take an hour or more.
|
||||
You can run the command synchronously by adding the flag `--wait` but be aware that running the conformance tests can take an hour or more.
|
||||
|
||||
View actively running pods:
|
||||
|
||||
|
||||
@@ -1,28 +1,46 @@
|
||||
# Releases and Versions
|
||||
|
||||
[Clastix Labs](https://github.com/clastix) organization publishes Kamaji's versions that correspond to specific project milestones and sets of new features. These versions are available in different types of release artifacts.
|
||||
[Clastix Labs](https://github.com/clastix) organization publishes Kamaji's versions that correspond to specific project milestones and sets of new features.
|
||||
These versions are available in different types of release artifacts.
|
||||
|
||||
## Types of release artifacts
|
||||
|
||||
### Latest Releases
|
||||
|
||||
CI is responsible for building OCI and Helm Chart for every commit in the main branch (`master`):
|
||||
The latest artifacts are aimed for rapid development tests and evaluation process.
|
||||
|
||||
Usage of the said artefacts is not suggested for production use-case due to missing version pinning of artefacts:
|
||||
|
||||
- `latest` for OCI image (e.g.: `docker.io/clastix/kamaji:latest`)
|
||||
- `0.0.0+latest` for the Helm Chart managed by CLASTIX (`https://clastix.github.io/charts`)
|
||||
|
||||
### Edge Releases
|
||||
|
||||
Edge Release artifacts are published on a monthly basis as part of the open source project. Versioning follows the form `edge-{year}.{month}.{incremental}` where incremental refers to the monthly release. For example, `edge-24.7.1` is the first edge release shipped in July 2024. The full list of edge release artifacts can be found on the Kamaji's GitHub [releases page](https://github.com/clastix/kamaji/releases).
|
||||
Edge Release artifacts are published on a monthly basis as part of the open source project.
|
||||
Versioning follows the form `edge-{year}.{month}.{incremental}` where incremental refers to the monthly release.
|
||||
For example, `edge-24.7.1` is the first edge release shipped in July 2024.
|
||||
The full list of edge release artifacts can be found on the Kamaji's GitHub [releases page](https://github.com/clastix/kamaji/releases).
|
||||
|
||||
Edge Release artifacts contain the code in from the main branch at the point in time when they were cut. This means they always have the latest features and fixes, and have undergone automated testing as well as maintainer code review. Edge Releases may involve partial features that are later modified or backed out. They may also involve breaking changes, of course, we do our best to avoid this. Edge Releases are generally considered production ready, and the project will mark specific releases as “_not recommended_” if bugs are discovered after release.
|
||||
Edge Release artifacts contain the code in from the main branch at the point in time when they were cut.
|
||||
This means they always have the latest features and fixes, and have undergone automated testing as well as maintainer code review.
|
||||
Edge Releases may involve partial features that are later modified or backed out.
|
||||
They may also involve breaking changes, of course, we do our best to avoid this.
|
||||
|
||||
Edge Releases are generally considered production ready and the project will mark specific releases as _"not recommended"_ if bugs are discovered after release.
|
||||
|
||||
| Kamaji | Management Cluster | Tenant Cluster |
|
||||
|-------------|--------------------|----------------------|
|
||||
| edge-24.9.2 | v1.22+ | [v1.28.0 .. v1.31.1] |
|
||||
| edge-25.4.1 | v1.22+ | [v1.30.0 .. v1.33.0] |
|
||||
|
||||
|
||||
Using Edge Release artifacts and reporting bugs helps us ensure a rapid pace of development and is a great way to help maintainers. We publish edge release guidance as part of the release notes and strive to always provide production-ready artifacts.
|
||||
Using Edge Release artifacts and reporting bugs helps us ensure a rapid pace of development and is a great way to help maintainers.
|
||||
We publish edge release guidance as part of the release notes and strive to always provide production-ready artifacts.
|
||||
|
||||
### Stable Releases
|
||||
|
||||
Stable Release artifacts of Kamaji follow semantic versioning, whereby changes in major version denote large feature additions and possible breaking changes and changes in minor versions denote safe upgrades without breaking changes. As of July 2024 [Clastix Labs](https://github.com/clastix) organization does no longer provide stable release artifacts. Latest stable release available is:
|
||||
As of July 2024, [Clastix Labs](https://github.com/clastix) does no longer provide release artifacts following its own semantic versioning:
|
||||
this choice has been put in place to help monetize CLASTIX in the development and maintenance of the Kamaji project.
|
||||
|
||||
| Kamaji | Management Cluster | Tenant Cluster |
|
||||
|--------|--------------------|----------------------|
|
||||
| v1.0.0 | v1.22+ | [v1.21.0 .. v1.30.2] |
|
||||
|
||||
Stable Release artifacts are offered now on a subscription basis by [CLASTIX](https://clastix.io), the main Kamaji project contributor. Learn more about [available subscription plans](https://clastix.io/support/) provided by CLASTIX.
|
||||
Stable artifacts such as OCI (containers) and Helm Chart ones are available on a subscription basis maintained by [CLASTIX](https://clastix.io):
|
||||
learn more about the available [Subscription Plans](https://clastix.io/support/).
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
# Use Cases
|
||||
Kamaji project has been initially started as a solution for actual and common problems such as minimizing the Total Cost of Ownership while running Kubernetes at large scale. However, it can open a wider range of use cases.
|
||||
|
||||
Here are a few:
|
||||
|
||||
- **Managed Kubernetes:** enable companies to provide Cloud Native Infrastructure with ease by introducing a strong separation of concerns between management and workloads. Centralize clusters management, monitoring, and observability by leaving developers to focus on applications, increase productivity and reduce operational costs.
|
||||
- **Kubernetes as a Service:** provide Kubernetes clusters in a self-service fashion by running management and workloads on different infrastructures with the option of Bring Your Own Device, BYOD.
|
||||
- **Control Plane as a Service:** provide multiple Kubernetes control planes running on top of a single Kubernetes cluster. Tenants who use namespaces based isolation often still need access to cluster wide resources like Cluster Roles, Admission Webhooks, or Custom Resource Definitions.
|
||||
- **Edge Computing:** distribute Kubernetes workloads across edge computing locations without having to manage multiple clusters across various providers. Centralize management of hundreds of control planes while leaving workloads to run isolated on their own dedicated infrastructure.
|
||||
- **Cluster Simulation:** check new Kubernetes API or experimental flag or a new tool without impacting production operations. Kamaji will let you simulate such things in a safe and controlled environment.
|
||||
- **Workloads Testing:** check the behaviour of your workloads on different and multiple versions of Kubernetes with ease by deploying multiple Control Planes in a single cluster.
|
||||
@@ -7,9 +7,9 @@ docs_dir: content
|
||||
site_dir: site
|
||||
site_author: bsctl
|
||||
site_description: >-
|
||||
Kamaji deploys and operates Kubernetes Control Plane at scale with a fraction of the operational burden.
|
||||
Kamaji is the Control Plane Manager for Kubernetes
|
||||
|
||||
copyright: Copyright © 2020 - 2023 Clastix Labs
|
||||
copyright: Copyright © 2020 - 2025 Clastix Labs
|
||||
|
||||
theme:
|
||||
name: material
|
||||
@@ -23,28 +23,21 @@ theme:
|
||||
- content.code.copy
|
||||
include_sidebar: true
|
||||
palette:
|
||||
# Palette toggle for automatic mode
|
||||
- media: "(prefers-color-scheme)"
|
||||
- scheme: default
|
||||
primary: white
|
||||
toggle:
|
||||
icon: material/brightness-auto
|
||||
name: Switch to light mode
|
||||
# Palette toggle for light mode
|
||||
- media: "(prefers-color-scheme: light)"
|
||||
scheme: default
|
||||
primary: white
|
||||
media: "(prefers-color-scheme: light)"
|
||||
toggle:
|
||||
icon: material/lightbulb
|
||||
name: Switch to dark mode
|
||||
# Palette toggle for dark mode
|
||||
- media: "(prefers-color-scheme: dark)"
|
||||
scheme: slate
|
||||
- scheme: slate
|
||||
primary: white
|
||||
media: "(prefers-color-scheme: dark)"
|
||||
toggle:
|
||||
icon: material/lightbulb-outline
|
||||
name: Switch to system preference
|
||||
name: Switch to light mode
|
||||
favicon: images/favicon.png
|
||||
logo: images/logo.png
|
||||
custom_dir: overrides
|
||||
|
||||
markdown_extensions:
|
||||
- admonition
|
||||
@@ -55,20 +48,38 @@ markdown_extensions:
|
||||
# Generate navigation bar
|
||||
nav:
|
||||
- 'Kamaji': index.md
|
||||
- 'Getting started': getting-started.md
|
||||
- 'Concepts': concepts.md
|
||||
- 'Getting started':
|
||||
- getting-started/index.md
|
||||
- getting-started/kamaji-kind.md
|
||||
- getting-started/kamaji-generic.md
|
||||
- getting-started/kamaji-aws.md
|
||||
- getting-started/kamaji-azure.md
|
||||
- 'Concepts':
|
||||
- concepts/index.md
|
||||
- concepts/tenant-control-plane.md
|
||||
- concepts/datastore.md
|
||||
- concepts/tenant-worker-nodes.md
|
||||
- concepts/konnectivity.md
|
||||
- 'Cluster API':
|
||||
- cluster-api/index.md
|
||||
- cluster-api/control-plane-provider.md
|
||||
- cluster-api/cluster-class.md
|
||||
- cluster-api/cluster-autoscaler.md
|
||||
- cluster-api/vsphere-infra-provider.md
|
||||
- cluster-api/proxmox-infra-provider.md
|
||||
- cluster-api/other-providers.md
|
||||
- 'Guides':
|
||||
- guides/index.md
|
||||
- guides/kamaji-azure-deployment.md
|
||||
- guides/alternative-datastore.md
|
||||
- guides/kamaji-gitops-flux.md
|
||||
- guides/upgrade.md
|
||||
- guides/datastore-migration.md
|
||||
- guides/backup-and-restore.md
|
||||
- guides/certs-lifecycle.md
|
||||
- guides/cluster-api.md
|
||||
- guides/datastore-migration.md
|
||||
- guides/gitops.md
|
||||
- guides/console.md
|
||||
- 'Use Cases': use-cases.md
|
||||
- guides/upgrade.md
|
||||
- guides/monitoring.md
|
||||
- guides/terraform.md
|
||||
- guides/contribute.md
|
||||
- 'Reference':
|
||||
- reference/index.md
|
||||
- reference/benchmark.md
|
||||
@@ -77,7 +88,7 @@ nav:
|
||||
- reference/versioning.md
|
||||
- reference/api.md
|
||||
- 'Telemetry': telemetry.md
|
||||
- 'Enterprise Addons':
|
||||
- 'Addons':
|
||||
- enterprise-addons/index.md
|
||||
- enterprise-addons/ingress.md
|
||||
- 'Contribute': contribute.md
|
||||
|
||||
|
||||
1
docs/overrides/assets/images/hero_logo_dark.svg
Normal file
|
After Width: | Height: | Size: 5.1 KiB |
1
docs/overrides/assets/images/hero_logo_light.svg
Normal file
|
After Width: | Height: | Size: 5.0 KiB |
0
docs/overrides/assets/javascripts/home.js
Normal file
234
docs/overrides/assets/stylesheets/home.css
Normal file
@@ -0,0 +1,234 @@
|
||||
/* Root variables */
|
||||
:root {
|
||||
--spacing-xs: 0.5rem;
|
||||
--spacing-sm: 0.8rem;
|
||||
--spacing-md: 1.2rem;
|
||||
--spacing-lg: 2rem;
|
||||
--border-radius: 4px;
|
||||
}
|
||||
|
||||
/* Common section styles */
|
||||
.tx-section {
|
||||
padding: var(--spacing-lg) 0;
|
||||
}
|
||||
|
||||
.tx-section--alternate {
|
||||
background: var(--md-default-bg-color--lightest);
|
||||
}
|
||||
|
||||
/* Grid layouts */
|
||||
.tx-grid {
|
||||
display: grid;
|
||||
gap: var(--spacing-lg);
|
||||
}
|
||||
|
||||
.tx-grid--3x2 {
|
||||
grid-template-columns: repeat(3, 1fr);
|
||||
}
|
||||
|
||||
.tx-grid--3x1 {
|
||||
grid-template-columns: repeat(3, 1fr);
|
||||
}
|
||||
|
||||
/* Card styles */
|
||||
.tx-card {
|
||||
padding: var(--spacing-md);
|
||||
background: var(--md-default-bg-color);
|
||||
border-radius: var(--border-radius);
|
||||
box-shadow: var(--md-shadow-z1);
|
||||
transition: transform 0.125s ease, box-shadow 0.125s ease;
|
||||
}
|
||||
|
||||
.tx-card:hover {
|
||||
transform: translateY(-2px);
|
||||
box-shadow: var(--md-shadow-z2);
|
||||
}
|
||||
|
||||
/* FAQ styles */
|
||||
.tx-faq {
|
||||
max-width: var(--md-typeset-width);
|
||||
margin: 0 auto;
|
||||
padding: 0 var(--spacing-md);
|
||||
}
|
||||
|
||||
.tx-faq_item {
|
||||
margin-bottom: var(--spacing-lg);
|
||||
}
|
||||
|
||||
.tx-faq_question {
|
||||
font-size: 1rem;
|
||||
margin-bottom: 1rem;
|
||||
color: var(--md-default-fg-color);
|
||||
}
|
||||
|
||||
.tx-faq_answer {
|
||||
font-size: 0.8rem;
|
||||
}
|
||||
|
||||
.tx-hero {
|
||||
padding: var(--spacing-lg) 0;
|
||||
}
|
||||
|
||||
.tx-hero .md-grid {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: var(--spacing-lg);
|
||||
}
|
||||
|
||||
.tx-hero_content {
|
||||
flex: 1;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.tx-hero_image {
|
||||
flex: 0.618;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.tx-hero_image img {
|
||||
max-width: 100%;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
.tx-hero_buttons {
|
||||
margin-top: var(--spacing-lg);
|
||||
}
|
||||
|
||||
/* Dark Mode Styles */
|
||||
[data-md-color-scheme="slate"] {
|
||||
/* Hero section */
|
||||
.tx-hero {
|
||||
background: var(--md-default-bg-color);
|
||||
color: var(--md-default-fg-color);
|
||||
}
|
||||
|
||||
/* Primary button */
|
||||
.tx-hero .md-button.md-button--primary {
|
||||
border: none;
|
||||
color: #ffffff;
|
||||
background-color: #D81D56;
|
||||
}
|
||||
|
||||
/* Secondary button */
|
||||
.tx-hero .md-button:not(.md-button--primary) {
|
||||
border: none;
|
||||
color: #24456F;
|
||||
background-color: #DBDCDB;
|
||||
}
|
||||
|
||||
/* Hover for both buttons */
|
||||
.tx-hero .md-button:hover {
|
||||
color: #24456F;
|
||||
background-color: #ffffff;
|
||||
}
|
||||
|
||||
/* Hero images */
|
||||
.tx-hero_image--light {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.tx-hero_image--dark {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
/* Light Mode Styles */
|
||||
[data-md-color-scheme="default"] {
|
||||
/* Hero section */
|
||||
.tx-hero {
|
||||
background: var(--md-default-bg-color--dark);
|
||||
color: var(--md-primary-bg-color);
|
||||
}
|
||||
|
||||
/* Primary button */
|
||||
.tx-hero .md-button.md-button--primary {
|
||||
border: none;
|
||||
color: #ffffff;
|
||||
background-color: #D81D56;
|
||||
}
|
||||
|
||||
/* Secondary button */
|
||||
.tx-hero .md-button:not(.md-button--primary) {
|
||||
border: none;
|
||||
color: #24456F;
|
||||
background-color: #DBDCDB;
|
||||
}
|
||||
|
||||
/* Hover for both buttons */
|
||||
.tx-hero .md-button:hover {
|
||||
background-color: #24456F;
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
/* Hero images */
|
||||
.tx-hero_image--light {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.tx-hero_image--dark {
|
||||
display: block;
|
||||
}
|
||||
}
|
||||
|
||||
/* Responsive design */
|
||||
@media screen and (max-width: 960px) {
|
||||
.tx-hero .md-grid {
|
||||
flex-direction: column;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.tx-hero_content {
|
||||
margin-bottom: var(--spacing-lg);
|
||||
}
|
||||
|
||||
.tx-grid--3x2,
|
||||
.tx-grid--3x1 {
|
||||
grid-template-columns: repeat(2, 1fr);
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (max-width: 600px) {
|
||||
.tx-grid--3x2,
|
||||
.tx-grid--3x1 {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
|
||||
.tx-hero_buttons {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: var(--spacing-sm);
|
||||
}
|
||||
|
||||
.tx-hero_buttons .md-button {
|
||||
width: 100%;
|
||||
margin: 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Icon styles */
|
||||
.tx-card_icon {
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
.tx-card_icon svg {
|
||||
width: 2rem;
|
||||
height: 2rem;
|
||||
}
|
||||
|
||||
/* Light Mode Icon Color */
|
||||
[data-md-color-scheme="default"] .tx-card_icon svg {
|
||||
fill: #24456F;
|
||||
}
|
||||
|
||||
/* Dark Mode Icon Color */
|
||||
[data-md-color-scheme="slate"] .tx-card_icon svg {
|
||||
fill: #ffffff;
|
||||
}
|
||||
|
||||
.user-quote {
|
||||
font-size: 1.2em;
|
||||
font-style: italic;
|
||||
margin: 2em 0;
|
||||
border-left: 4px solid #D81D56 !important;
|
||||
padding-left: 1em;
|
||||
}
|
||||
209
docs/overrides/home.html
Normal file
@@ -0,0 +1,209 @@
|
||||
{% extends "main.html" %}
|
||||
|
||||
{% block extrahead %}
|
||||
<link rel="stylesheet" href="{{ 'assets/stylesheets/home.css' | url }}">
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
|
||||
<!-- Hero Section -->
|
||||
<section class="tx-hero">
|
||||
<div class="md-grid md-typeset">
|
||||
<div class="tx-hero_content">
|
||||
<h1>The Control Plane Manager for Kubernetes</h1>
|
||||
<p>Kamaji runs the Control Plane as pods within a Management Cluster, rather than on dedicated machines. This approach simplifies operations and enables the management of multiple Kubernetes clusters with a fraction of the operational burden.</p>
|
||||
<div class="tx-hero_buttons">
|
||||
<a href="{{ 'getting-started/' | url }}" class="md-button md-button--primary">Get Started</a>
|
||||
<a href="{{ 'concepts/' | url }}" class="md-button">Concepts</a>
|
||||
</div>
|
||||
</div>
|
||||
<div class="tx-hero_image">
|
||||
<img class="tx-hero_image--light" src="assets/images/hero_logo_light.svg" alt="Kamaji Light Theme" draggable="false">
|
||||
<img class="tx-hero_image--dark" src="assets/images/hero_logo_dark.svg" alt="Kamaji Dark Theme" draggable="false">
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- User Quote Section -->
|
||||
<section class="tx-section tx-section--alternate">
|
||||
<div class="md-grid md-typeset">
|
||||
<h2 class="tx-section-title">What Users Say</h2>
|
||||
<blockquote class="user-quote">
|
||||
"Kamaji works exactly as expected: it's simple, efficient, scalable, and I especially appreciate how Clastix has always been available for technical discussions and support throughout these two years of collaboration."
|
||||
<br><br>
|
||||
<span style="font-weight:bold; font-size:1em;">— Jérémie Monsinjon, Head of Containers @OVHCloud</span>
|
||||
</blockquote>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Highlights Section -->
|
||||
<section class="tx-section tx-section--alternate">
|
||||
<div class="md-grid md-typeset">
|
||||
<h2 class="tx-section-title">Highlights</h2>
|
||||
<div class="tx-grid tx-grid--3x2">
|
||||
<div class="tx-card">
|
||||
<div class="tx-card_icon">
|
||||
{% include ".icons/fontawesome/solid/layer-group.svg" %}
|
||||
</div>
|
||||
<h3>Multi-Tenancy</h3>
|
||||
<p>Deploy multiple Kubernetes control planes as pods within a single management cluster. Each control plane operates independently, ensuring complete isolation between tenants.</p>
|
||||
</div>
|
||||
|
||||
<div class="tx-card">
|
||||
<div class="tx-card_icon">
|
||||
{% include ".icons/fontawesome/solid/cube.svg" %}
|
||||
</div>
|
||||
<h3>Upstream Kubernetes</h3>
|
||||
<p>Uses unmodified upstream Kubernetes components and leverages kubeadm, the default tool for cluster bootstrapping and management.</p>
|
||||
</div>
|
||||
|
||||
<div class="tx-card">
|
||||
<div class="tx-card_icon">
|
||||
{% include ".icons/fontawesome/solid/server.svg" %}
|
||||
</div>
|
||||
<h3>Infrastructure Agnostic</h3>
|
||||
<p>Connect worker nodes from any infrastructure provider. Supports bare metal, virtual machines, and cloud instances, allowing hybrid and multi-cloud deployments.</p>
|
||||
</div>
|
||||
|
||||
<div class="tx-card">
|
||||
<div class="tx-card_icon">
|
||||
{% include ".icons/fontawesome/solid/chart-line.svg" %}
|
||||
</div>
|
||||
<h3>Resource Optimization</h3>
|
||||
<p>Control planes run as pods, sharing the management cluster's resources efficiently. Scale control planes independently based on actual usage patterns and requirements.</p>
|
||||
</div>
|
||||
|
||||
<div class="tx-card">
|
||||
<div class="tx-card_icon">
|
||||
{% include ".icons/fontawesome/solid/puzzle-piece.svg" %}
|
||||
</div>
|
||||
<h3>Cluster API Integration</h3>
|
||||
<p>Seamlessly integrates with Cluster API providers for automated infrastructure provisioning and lifecycle management across different environments.</p>
|
||||
</div>
|
||||
|
||||
<div class="tx-card">
|
||||
<div class="tx-card_icon">
|
||||
{% include ".icons/fontawesome/solid/shield.svg" %}
|
||||
</div>
|
||||
<h3>High Availability</h3>
|
||||
<p>Support for multi-node control plane deployments with distributed etcd clusters. Includes automated failover and recovery mechanisms for production workloads.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Use Cases Section -->
|
||||
<section class="tx-section tx-section--alternate">
|
||||
<div class="md-grid md-typeset">
|
||||
<h2 class="tx-section-title">Use Cases</h2>
|
||||
<div class="tx-grid tx-grid--3x2">
|
||||
<div class="tx-card">
|
||||
<div class="tx-card_icon">
|
||||
{% include ".icons/fontawesome/solid/building.svg" %}
|
||||
</div>
|
||||
<h3>Private Cloud</h3>
|
||||
<p>Optimize your data center resources by running multiple Kubernetes control planes. Perfect for organizations that need complete control over their infrastructure while maintaining strict isolation between different business units.</p>
|
||||
</div>
|
||||
|
||||
<div class="tx-card">
|
||||
<div class="tx-card_icon">
|
||||
{% include ".icons/fontawesome/solid/cloud.svg" %}
|
||||
</div>
|
||||
<h3>Public Cloud</h3>
|
||||
<p>Build independent public cloud offerings with Kubernetes as a Service capabilities. Provide the same user experience of major cloud providers while maintaining full control over the infrastructure and operational costs.</p>
|
||||
</div>
|
||||
|
||||
<div class="tx-card">
|
||||
<div class="tx-card_icon">
|
||||
{% include ".icons/fontawesome/solid/microchip.svg" %}
|
||||
</div>
|
||||
<h3>Bare Metal</h3>
|
||||
<p>Maximize hardware utilization by running multiple control planes on your physical infrastructure. Ideal for environments where direct hardware access, network performance, and data locality are critical.</p>
|
||||
</div>
|
||||
|
||||
<div class="tx-card">
|
||||
<div class="tx-card_icon">
|
||||
{% include ".icons/fontawesome/solid/wave-square.svg" %}
|
||||
</div>
|
||||
<h3>Edge Computing</h3>
|
||||
<p>Run lightweight Kubernetes clusters at the edge while managing their control planes centrally. Reduce the hardware footprint at edge locations by keeping control planes in your central management cluster.</p>
|
||||
</div>
|
||||
|
||||
<div class="tx-card">
|
||||
<div class="tx-card_icon">
|
||||
{% include ".icons/fontawesome/solid/gears.svg" %}
|
||||
</div>
|
||||
<h3>Platform Engineering</h3>
|
||||
<p>Build internal Kubernetes platforms with standardized cluster provisioning and management. Enable self-service capabilities while maintaining centralized control and governance over all clusters.</p>
|
||||
</div>
|
||||
|
||||
<div class="tx-card">
|
||||
<div class="tx-card_icon">
|
||||
{% include ".icons/fontawesome/solid/cloud-arrow-up.svg" %}
|
||||
</div>
|
||||
<h3>BYO Cloud</h3>
|
||||
<p>Create your own managed Kubernetes service using standard upstream components. Provide dedicated clusters to your users while maintaining operational efficiency through centralized control plane management.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- FAQ Section -->
|
||||
<section class="tx-section tx-section--alternate">
|
||||
<div class="md-grid md-typeset">
|
||||
<h2 class="tx-section-title">Frequently Asked Questions</h2>
|
||||
<div class="tx-faq">
|
||||
<div class="tx-faq_item">
|
||||
<div class="tx-faq_question">Q. What does Kamaji mean?</div>
|
||||
<div class="tx-faq_answer">
|
||||
<p>A. Kamaji is named after <em>Kamajī ( かまじ )</em> from the Japanese movie <a href="https://en.wikipedia.org/wiki/Spirited_Away">Spirited Away</a>. Kamajī is the boiler room operator who efficiently manages the bathhouse's water system - just like how our Kamaji manages Kubernetes clusters!</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="tx-faq_item">
|
||||
<div class="tx-faq_question">Q. Is Kamaji another Kubernetes distribution?</div>
|
||||
<div class="tx-faq_answer">
|
||||
<p>A. No, Kamaji is a Kubernetes Operator that provides managed Kubernetes clusters as a service, leveraging kubeadm for conformant CNCF Kubernetes clusters.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="tx-faq_item">
|
||||
<div class="tx-faq_question">Q. How is it different from typical solutions?</div>
|
||||
<div class="tx-faq_answer">
|
||||
<p>A. Kamaji runs the Control Plane as regular pods in the Management Cluster, offering it as a service and making it more cost-effective and easier to operate at scale.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="tx-faq_item">
|
||||
<div class="tx-faq_question">Q. How does it compare to Public Cloud services?</div>
|
||||
<div class="tx-faq_answer">
|
||||
<p>A. Kamaji gives you full control over your Kubernetes infrastructures, offering consistency across cloud, data-center, and edge while simplifying centralized operations.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="tx-faq_item">
|
||||
<div class="tx-faq_question">Q. How does it differ from Cluster API?</div>
|
||||
<div class="tx-faq_answer">
|
||||
<p>A. They complement each other: Kamaji simplifies Control Plane management, while Cluster API handles infrastructure abstraction and lifecycle management.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="tx-faq_item">
|
||||
<div class="tx-faq_question">Q. Why Kamaji when Capsule exists?</div>
|
||||
<div class="tx-faq_answer">
|
||||
<p>A. While <a href="https://projectcapsule.dev">Capsule</a> provides a single control plane with isolated namespaces, Kamaji provides dedicated control planes when tenants need full cluster admin permissions.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="tx-faq_item">
|
||||
<div class="tx-faq_question">Q. Do you provide support?</div>
|
||||
<div class="tx-faq_answer">
|
||||
<p>A. Yes, <a href="https://clastix.io">Clastix</a> offers subscription-based, enterprise-grade support plans for Kamaji. Please contact us to discuss your support needs.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
{% endblock %}
|
||||
@@ -1,2 +1,2 @@
|
||||
mkdocs>=1.3.0
|
||||
mkdocs-material>=8.2.8
|
||||
mkdocs
|
||||
mkdocs-material
|
||||
10
docs/templates/reference-cr.tmpl
vendored
@@ -1,5 +1,8 @@
|
||||
# API Reference
|
||||
|
||||
This section contains the Kamaji Customer Resource Definitions,
|
||||
as well as the Cluster API Control Plane provider ones.
|
||||
|
||||
Packages:
|
||||
{{range .Groups}}
|
||||
- [{{.Group}}/{{.Version}}](#{{ anchorize (printf "%s/%s" .Group .Version) }})
|
||||
@@ -8,7 +11,7 @@ Packages:
|
||||
{{- range .Groups }}
|
||||
{{- $group := . }}
|
||||
|
||||
# {{.Group}}/{{.Version}}
|
||||
## {{.Group}}/{{.Version}}
|
||||
|
||||
Resource Types:
|
||||
{{range .Kinds}}
|
||||
@@ -17,15 +20,14 @@ Resource Types:
|
||||
|
||||
{{range .Kinds}}
|
||||
{{$kind := .}}
|
||||
## {{.Name}}
|
||||
### {{.Name}}
|
||||
|
||||
{{range .Types}}
|
||||
|
||||
{{if not .IsTopLevel}}
|
||||
### {{.Name}}
|
||||
<span id="{{ anchorize .Name }}">`{{.Name}}`</span>
|
||||
{{end}}
|
||||
|
||||
|
||||
{{.Description}}
|
||||
|
||||
<table>
|
||||
|
||||
@@ -65,6 +65,7 @@ var _ = Describe("Deploy a TenantControlPlane with resource with custom service
|
||||
// Delete the service account and TenantControlPlane resources after test is finished
|
||||
JustAfterEach(func() {
|
||||
Expect(k8sClient.Delete(context.Background(), tcp)).Should(Succeed())
|
||||
Expect(k8sClient.Delete(context.Background(), sa)).NotTo(HaveOccurred())
|
||||
})
|
||||
// Check if TenantControlPlane resource has been created and if its pods have the right service account
|
||||
It("Should be Ready and have correct sa", func() {
|
||||
|
||||
@@ -22,18 +22,18 @@ import (
|
||||
"github.com/clastix/kamaji/internal/utilities"
|
||||
)
|
||||
|
||||
var _ = Describe("When migrating a Tenant Control Plane to another datastore", func() {
|
||||
func featureTestMigration(driver string) {
|
||||
var tcp *kamajiv1alpha1.TenantControlPlane
|
||||
// Create a TenantControlPlane resource into the cluster
|
||||
JustBeforeEach(func() {
|
||||
// Fill TenantControlPlane object
|
||||
tcp = &kamajiv1alpha1.TenantControlPlane{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("migrating-%s-etcd", rand.String(5)),
|
||||
Name: fmt.Sprintf("migrating-%s-%s", rand.String(5), driver),
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
DataStore: "etcd-bronze",
|
||||
DataStore: fmt.Sprintf("%s-bronze", driver),
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.To(int32(1)),
|
||||
@@ -91,7 +91,7 @@ var _ = Describe("When migrating a Tenant Control Plane to another datastore", f
|
||||
return err
|
||||
}
|
||||
|
||||
tcp.Spec.DataStore = "etcd-silver"
|
||||
tcp.Spec.DataStore = fmt.Sprintf("%s-silver", driver)
|
||||
|
||||
return k8sClient.Update(context.Background(), tcp)
|
||||
}, time.Minute, time.Second).ShouldNot(HaveOccurred())
|
||||
@@ -114,11 +114,28 @@ var _ = Describe("When migrating a Tenant Control Plane to another datastore", f
|
||||
}
|
||||
|
||||
return tcp.Status.Storage.DataStoreName
|
||||
}, time.Minute, time.Second).Should(BeEquivalentTo("etcd-silver"))
|
||||
}, time.Minute, time.Second).Should(BeEquivalentTo(fmt.Sprintf("%s-silver", driver)))
|
||||
|
||||
By("checking the presence of the previous Namespace")
|
||||
Eventually(func() error {
|
||||
return tcpClient.Get(context.Background(), types.NamespacedName{Name: ns.GetName()}, &corev1.Namespace{})
|
||||
}).ShouldNot(HaveOccurred())
|
||||
// The Freeze ValidatingWebhookConfiguration should have been removed successfully:
|
||||
// we're checking write operations are allowed.
|
||||
By("checking the changes are newly allowed")
|
||||
Eventually(func() error {
|
||||
var writeNamespace corev1.Namespace
|
||||
writeNamespace.Name = fmt.Sprintf("write-%s-%s", rand.String(5), driver)
|
||||
|
||||
return tcpClient.Create(context.Background(), &writeNamespace)
|
||||
}).ShouldNot(HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
var _ = Describe("When migrating a Tenant Control Plane to another datastore (etcd)", func() {
|
||||
featureTestMigration("etcd")
|
||||
})
|
||||
|
||||
var _ = Describe("When migrating a Tenant Control Plane to another datastore (postgresql)", func() {
|
||||
featureTestMigration("postgresql")
|
||||
})
|
||||
|
||||
68
e2e/tcp_scale_test.go
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
|
||||
var _ = Describe("Scale a TenantControlPlane resource", func() {
|
||||
// Fill TenantControlPlane object
|
||||
tcp := &kamajiv1alpha1.TenantControlPlane{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "tcp-clusterip-scale",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
},
|
||||
},
|
||||
NetworkProfile: kamajiv1alpha1.NetworkProfileSpec{
|
||||
Address: "172.18.0.2",
|
||||
},
|
||||
Kubernetes: kamajiv1alpha1.KubernetesSpec{
|
||||
Version: "v1.23.6",
|
||||
Kubelet: kamajiv1alpha1.KubeletSpec{
|
||||
CGroupFS: "cgroupfs",
|
||||
},
|
||||
AdmissionControllers: kamajiv1alpha1.AdmissionControllers{
|
||||
"LimitRanger",
|
||||
"ResourceQuota",
|
||||
},
|
||||
},
|
||||
Addons: kamajiv1alpha1.AddonsSpec{},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a TenantControlPlane resource into the cluster
|
||||
JustBeforeEach(func() {
|
||||
Expect(k8sClient.Create(context.Background(), tcp)).NotTo(HaveOccurred())
|
||||
StatusMustEqualTo(tcp, kamajiv1alpha1.VersionReady)
|
||||
})
|
||||
|
||||
// Delete the TenantControlPlane resource after test is finished
|
||||
JustAfterEach(func() {
|
||||
Expect(k8sClient.Delete(context.Background(), tcp)).Should(Succeed())
|
||||
})
|
||||
|
||||
// Scale TenantControlPlane resource and check the status
|
||||
It("Should scale correctly", func() {
|
||||
ScaleTenantControlPlane(tcp, 0)
|
||||
StatusMustEqualTo(tcp, kamajiv1alpha1.VersionSleeping)
|
||||
ScaleTenantControlPlane(tcp, 1)
|
||||
StatusMustEqualTo(tcp, kamajiv1alpha1.VersionReady)
|
||||
})
|
||||
})
|
||||