Compare commits
59 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1ccc1d1b1e | ||
|
|
1d96710890 | ||
|
|
edceda3302 | ||
|
|
755cc5bacd | ||
|
|
e0c86d685c | ||
|
|
ddb700f4f0 | ||
|
|
4bdddfc695 | ||
|
|
8b999f1323 | ||
|
|
2571086ff3 | ||
|
|
cd9d92296b | ||
|
|
f24ff618a9 | ||
|
|
4bf39149ec | ||
|
|
045c5bbd7c | ||
|
|
6eb3171817 | ||
|
|
289bad540c | ||
|
|
ac06447706 | ||
|
|
95de31d697 | ||
|
|
0037b4941c | ||
|
|
c251f57f06 | ||
|
|
129cb0e6fe | ||
|
|
73e0618ad3 | ||
|
|
6c2634b5e9 | ||
|
|
dac670113f | ||
|
|
c8039cdf5c | ||
|
|
a7cfc9a898 | ||
|
|
0f1a4f28de | ||
|
|
40f57466e2 | ||
|
|
feed6634a5 | ||
|
|
c85e686283 | ||
|
|
05ffd6cf75 | ||
|
|
e16855a1b4 | ||
|
|
d21eb135fd | ||
|
|
c5e12cc401 | ||
|
|
dc97d69d0c | ||
|
|
bac5d56076 | ||
|
|
973392bd85 | ||
|
|
30b36ba7f4 | ||
|
|
0db27a7335 | ||
|
|
facf23a055 | ||
|
|
6674373037 | ||
|
|
72712693a2 | ||
|
|
33709005b1 | ||
|
|
6ce83c551e | ||
|
|
2b638fe09d | ||
|
|
58a5cac9e8 | ||
|
|
e9d2af931a | ||
|
|
a996803db5 | ||
|
|
e34fc1851f | ||
|
|
740fe9c938 | ||
|
|
65854721de | ||
|
|
adde828e03 | ||
|
|
ffc2c7c967 | ||
|
|
0f195286a7 | ||
|
|
f768f93fe9 | ||
|
|
05cbff1fd8 | ||
|
|
7e94ecdbab | ||
|
|
648da19687 | ||
|
|
6c4b339c4b | ||
|
|
eee62032de |
6
.github/workflows/ci.yaml
vendored
@@ -14,12 +14,12 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.19'
|
||||
go-version: '1.21'
|
||||
check-latest: true
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3.2.0
|
||||
with:
|
||||
version: v1.49.0
|
||||
version: v1.54.2
|
||||
only-new-issues: false
|
||||
args: --timeout 5m --config .golangci.yml
|
||||
diff:
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.19'
|
||||
go-version: '1.21'
|
||||
check-latest: true
|
||||
- run: make yaml-installation-file
|
||||
- name: Checking if YAML installer file is not aligned
|
||||
|
||||
2
.github/workflows/docker-ci.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
id: build-args
|
||||
run: |
|
||||
# Declare vars for internal use
|
||||
VERSION=$(git describe --abbrev=0 --tags)
|
||||
VERSION=$(make get_version)
|
||||
GIT_HEAD_COMMIT=$(git rev-parse --short HEAD)
|
||||
GIT_TAG_COMMIT=$(git rev-parse --short $VERSION)
|
||||
GIT_MODIFIED_1=$(git diff $GIT_HEAD_COMMIT $GIT_TAG_COMMIT --quiet && echo "" || echo ".dev")
|
||||
|
||||
4
.github/workflows/e2e.yaml
vendored
@@ -13,6 +13,7 @@ on:
|
||||
- 'main.go'
|
||||
- 'Makefile'
|
||||
- 'internal/**'
|
||||
- 'cmd/**'
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
paths:
|
||||
@@ -25,6 +26,7 @@ on:
|
||||
- 'main.go'
|
||||
- 'Makefile'
|
||||
- 'internal/**'
|
||||
- 'cmd/**'
|
||||
|
||||
jobs:
|
||||
kind:
|
||||
@@ -36,7 +38,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.19'
|
||||
go-version: '1.21'
|
||||
check-latest: true
|
||||
- run: |
|
||||
sudo apt-get update
|
||||
|
||||
3
.gitignore
vendored
@@ -24,6 +24,9 @@ bin
|
||||
*~
|
||||
.vscode
|
||||
|
||||
# Tilt files.
|
||||
.tiltbuild
|
||||
|
||||
**/*.kubeconfig
|
||||
**/*.crt
|
||||
**/*.key
|
||||
|
||||
@@ -11,6 +11,7 @@ linters-settings:
|
||||
|
||||
linters:
|
||||
disable:
|
||||
- depguard
|
||||
- wrapcheck
|
||||
- gomnd
|
||||
- scopelint
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.19 as builder
|
||||
FROM golang:1.21 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
|
||||
10
Makefile
@@ -3,7 +3,7 @@
|
||||
# To re-generate a bundle for another specific version without changing the standard setup, you can:
|
||||
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
|
||||
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
|
||||
VERSION ?= 0.3.2
|
||||
VERSION ?= 0.4.0
|
||||
|
||||
# CHANNELS define the bundle channels used in the bundle.
|
||||
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
|
||||
@@ -89,7 +89,7 @@ controller-gen: ## Download controller-gen locally if necessary.
|
||||
|
||||
GOLANGCI_LINT = $(shell pwd)/bin/golangci-lint
|
||||
golangci-lint: ## Download golangci-lint locally if necessary.
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.49.0)
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2)
|
||||
|
||||
KUSTOMIZE = $(shell pwd)/bin/kustomize
|
||||
kustomize: ## Download kustomize locally if necessary.
|
||||
@@ -154,6 +154,10 @@ GIT_MODIFIED ?= $$(echo "$(GIT_MODIFIED_1)$(GIT_MODIFIED_2)")
|
||||
GIT_REPO ?= $$(git config --get remote.origin.url)
|
||||
BUILD_DATE ?= $$(git log -1 --format="%at" | xargs -I{} date -d @{} +%Y-%m-%dT%H:%M:%S)
|
||||
|
||||
.PHONY: get_version
|
||||
get_version:
|
||||
@echo -n v$(VERSION)
|
||||
|
||||
build: generate fmt vet ## Build manager binary.
|
||||
go build -o bin/manager main.go
|
||||
|
||||
@@ -165,7 +169,7 @@ docker-build: ## Build docker image with the manager.
|
||||
--build-arg GIT_TAG_COMMIT=$(GIT_TAG_COMMIT) \
|
||||
--build-arg GIT_MODIFIED=$(GIT_MODIFIED) \
|
||||
--build-arg GIT_REPO=$(GIT_REPO) \
|
||||
--build-arg GIT_LAST_TAG=$(VERSION) \
|
||||
--build-arg GIT_LAST_TAG=v$(VERSION) \
|
||||
--build-arg BUILD_DATE=$(BUILD_DATE)
|
||||
|
||||
docker-push: ## Push docker image with the manager.
|
||||
|
||||
39
README.md
@@ -12,18 +12,18 @@
|
||||

|
||||

|
||||
|
||||
**Kamaji** deploys and operates **Kubernetes Control Plane** at scale with a fraction of the operational burden. Kamaji is special because the Control Plane components are running in a single pod instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
|
||||
**Kamaji** is a **Kubernetes Control Plane Manager**. It operates Kubernetes at scale with a fraction of the operational burden. Kamaji is special because the Control Plane components are running inside pods instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
|
||||
|
||||
<img src="docs/content/images/architecture.png" width="600">
|
||||
|
||||
## Features
|
||||
## Main Features
|
||||
|
||||
- **Self Service Kubernetes:** leave users the freedom to self-provision their Kubernetes clusters according to the assigned boundaries.
|
||||
- **Multi-cluster Management:** centrally manage multiple clusters from a single admin cluster. Happy SREs.
|
||||
- **Cheaper Control Planes:** place multiple control planes on a single node, instead of having three nodes for a single control plane.
|
||||
- **Stronger Multi-Tenancy:** leave users to access the control plane with admin permissions while keeping them isolated at the infrastructure level.
|
||||
- **Kubernetes Inception:** use Kubernetes to manage Kubernetes by re-using all the Kubernetes goodies you already know and love.
|
||||
- **Full APIs compliant:** all clusters are CNCF compliant built with upstream Kubernetes binaries
|
||||
- **Multi-cluster Management:** centrally manage multiple Kubernetes clusters from a single Management Cluster.
|
||||
- **High-density Control Plane:** place multiple control planes on the same infrastructure, instead of having dedicated machines for each control plane.
|
||||
- **Strong Multi-tenancy:** leave users to access the control plane with admin permissions while keeping them isolated at the infrastructure level.
|
||||
- **Kubernetes Inception:** use Kubernetes to manage Kubernetes with automation, high-availability, fault tolerance, and autoscaling out of the box.
|
||||
- **Bring Your Own Device:** keep the control plane isolated from data plane. Worker nodes can join and run consistently everywhere: cloud, edge, and data-center.
|
||||
- **Full CNCF compliant:** all clusters are built with upstream Kubernetes binaries, resulting in full CNCF compliant Kubernetes clusters.
|
||||
|
||||
## Roadmap
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
- [ ] Autoscaling of Tenant Control Plane
|
||||
- [x] Provisioning through Cluster APIs
|
||||
- [ ] Terraform provider
|
||||
- [ ] Custom Prometheus metrics for monitoring and alerting
|
||||
- [ ] Custom Prometheus metrics
|
||||
|
||||
|
||||
## Documentation
|
||||
@@ -45,24 +45,3 @@ Please, check the project's [documentation](https://kamaji.clastix.io/) for gett
|
||||
|
||||
## Contributions
|
||||
Kamaji is Open Source with Apache 2 license and any contribution is welcome. Open an issue or suggest an enhancement on the GitHub [project's page](https://github.com/clastix/kamaji). Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
|
||||
|
||||
## FAQs
|
||||
Q. What does Kamaji mean?
|
||||
|
||||
A. Kamaji is named as the character _Kamaji_ from the Japanese movie [_Spirited Away_](https://en.wikipedia.org/wiki/Spirited_Away).
|
||||
|
||||
Q. Is Kamaji another Kubernetes distribution?
|
||||
|
||||
A. No, Kamaji is a Kubernetes Operator you can install on top of any Kubernetes cluster to provide hundreds or thousands of managed Kubernetes clusters as a service. We tested Kamaji on vanilla Kubernetes 1.22+, KinD, and Azure AKS. We expect it to work smoothly on other Kubernetes distributions. The tenant clusters made with Kamaji are conformant CNCF Kubernetes clusters as we leverage [`kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
|
||||
|
||||
Q. Is it safe to run Kubernetes control plane components in a pod instead of dedicated virtual machines?
|
||||
|
||||
A. Yes, the tenant control plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
|
||||
|
||||
Q. You already provide a Kubernetes multi-tenancy solution with [Capsule](https://capsule.clastix.io). Why does Kamaji matter?
|
||||
|
||||
A. A multi-tenancy solution, like Capsule shares the Kubernetes control plane among all tenants keeping tenant namespaces isolated by policies. While the solution is the right choice by balancing between features and ease of usage, there are cases where a tenant user requires access to the control plane, for example, when a tenant requires to manage CRDs on his own. With Kamaji, you can provide cluster admin permissions to the tenant.
|
||||
|
||||
Q. Well you convinced me, how to get a try?
|
||||
|
||||
A. It is possible to get started with Kamaji on a laptop with [KinD](getting-started.md) installed.
|
||||
@@ -108,7 +108,7 @@ type DeploymentSpec struct {
|
||||
// +kubebuilder:default={registry:"registry.k8s.io",apiServerImage:"kube-apiserver",controllerManagerImage:"kube-controller-manager",schedulerImage:"kube-scheduler"}
|
||||
RegistrySettings RegistrySettings `json:"registrySettings,omitempty"`
|
||||
// +kubebuilder:default=2
|
||||
Replicas int32 `json:"replicas,omitempty"`
|
||||
Replicas *int32 `json:"replicas,omitempty"`
|
||||
// NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
// Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
|
||||
1
assets/logo.svg
Normal file
|
After Width: | Height: | Size: 5.1 KiB |
@@ -1,6 +1,6 @@
|
||||
apiVersion: v2
|
||||
appVersion: v0.3.2
|
||||
description: Kamaji deploys and operates Kubernetes at scale with a fraction of the operational burden. Kamaji turns any Kubernetes cluster into an “admin cluster” to orchestrate other Kubernetes clusters called “tenant clusters”. Kamaji is special because the Control Plane components are running in a single pod instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
|
||||
appVersion: v0.4.0
|
||||
description: Kamaji is a Kubernetes Control Plane Manager.
|
||||
home: https://github.com/clastix/kamaji
|
||||
icon: https://github.com/clastix/kamaji/raw/master/assets/logo-colored.png
|
||||
kubeVersion: ">=1.21.0-0"
|
||||
@@ -15,8 +15,8 @@ name: kamaji
|
||||
sources:
|
||||
- https://github.com/clastix/kamaji
|
||||
type: application
|
||||
version: 0.12.3
|
||||
version: 0.14.0
|
||||
annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
catalog.cattle.io/release-name: kamaji
|
||||
catalog.cattle.io/display-name: Kamaji
|
||||
catalog.cattle.io/display-name: Kamaji
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# kamaji
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
Kamaji deploys and operates Kubernetes at scale with a fraction of the operational burden. Kamaji turns any Kubernetes cluster into an “admin cluster” to orchestrate other Kubernetes clusters called “tenant clusters”. Kamaji is special because the Control Plane components are running in a single pod instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
|
||||
Kamaji is a Kubernetes Control Plane Manager.
|
||||
|
||||
## Maintainers
|
||||
|
||||
@@ -73,8 +73,9 @@ Here the values you can override:
|
||||
| datastore.basicAuth.usernameSecret.name | string | `nil` | The name of the Secret containing the username used to connect to the relational database. |
|
||||
| datastore.basicAuth.usernameSecret.namespace | string | `nil` | The namespace of the Secret containing the username used to connect to the relational database. |
|
||||
| datastore.driver | string | `"etcd"` | (string) The Kamaji Datastore driver, supported: etcd, MySQL, PostgreSQL (defaults=etcd). |
|
||||
| datastore.enabled | bool | `true` | (bool) Enable the Kamaji Datastore creation (default=true) |
|
||||
| datastore.endpoints | list | `[]` | (array) List of endpoints of the selected Datastore. When letting the Chart install the etcd datastore, this field is populated automatically. |
|
||||
| datastore.nameOverride | string | `nil` | The Datastore name override, if empty defaults to `default` |
|
||||
| datastore.nameOverride | string | `nil` | The Datastore name override, if empty and enabled=true defaults to `default`, if enabled=false, this is the name of the Datastore to connect to. |
|
||||
| datastore.tlsConfig.certificateAuthority.certificate.keyPath | string | `nil` | Key of the Secret which contains the content of the certificate. |
|
||||
| datastore.tlsConfig.certificateAuthority.certificate.name | string | `nil` | Name of the Secret containing the CA required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
| datastore.tlsConfig.certificateAuthority.certificate.namespace | string | `nil` | Namespace of the Secret containing the CA required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
@@ -100,10 +101,11 @@ Here the values you can override:
|
||||
| etcd.persistence.accessModes[0] | string | `"ReadWriteOnce"` | |
|
||||
| etcd.persistence.customAnnotations | object | `{}` | The custom annotations to add to the PVC |
|
||||
| etcd.persistence.size | string | `"10Gi"` | |
|
||||
| etcd.persistence.storageClass | string | `""` | |
|
||||
| etcd.persistence.storageClassName | string | `""` | |
|
||||
| etcd.port | int | `2379` | The client request port. |
|
||||
| etcd.serviceAccount.create | bool | `true` | Create a ServiceAccount, required to install and provision the etcd backing storage (default: true) |
|
||||
| etcd.serviceAccount.name | string | `""` | Define the ServiceAccount name to use during the setup and provision of the etcd backing storage (default: "") |
|
||||
| etcd.tolerations | list | `[]` | (array) Kubernetes affinity rules to apply to Kamaji etcd pods |
|
||||
| extraArgs | list | `[]` | A list of extra arguments to add to the kamaji controller default ones |
|
||||
| fullnameOverride | string | `""` | |
|
||||
| healthProbeBindAddress | string | `":8081"` | The address the probe endpoint binds to. (default ":8081") |
|
||||
|
||||
@@ -2,7 +2,11 @@
|
||||
Create a default fully qualified datastore name.
|
||||
*/}}
|
||||
{{- define "datastore.fullname" -}}
|
||||
{{- if .Values.datastore.enabled }}
|
||||
{{- default "default" .Values.datastore.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- required "A valid .Values.datastore.nameOverride required!" .Values.datastore.nameOverride }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{{- if .Values.datastore.enabled}}
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: DataStore
|
||||
metadata:
|
||||
@@ -24,3 +25,4 @@ spec:
|
||||
{{- include "datastore.certificateAuthority" . | indent 6 }}
|
||||
clientCertificate:
|
||||
{{- include "datastore.clientCertificate" . | indent 6 }}
|
||||
{{- end}}
|
||||
|
||||
@@ -30,11 +30,15 @@ spec:
|
||||
- bash
|
||||
- -c
|
||||
- |-
|
||||
etcdctl member list -w table &&
|
||||
etcdctl user add --no-password=true root &&
|
||||
etcdctl role add root &&
|
||||
etcdctl user grant-role root root &&
|
||||
etcdctl auth enable
|
||||
etcdctl member list -w table
|
||||
if etcdctl user get root &>/dev/null; then
|
||||
echo "User already exists, nothing to do"
|
||||
else
|
||||
etcdctl user add --no-password=true root &&
|
||||
etcdctl role add root &&
|
||||
etcdctl user grant-role root root &&
|
||||
etcdctl auth enable
|
||||
fi
|
||||
env:
|
||||
- name: ETCDCTL_ENDPOINTS
|
||||
value: https://etcd-0.{{ include "etcd.serviceName" . }}.{{ .Release.Namespace }}.svc.cluster.local:2379
|
||||
|
||||
@@ -37,13 +37,21 @@ spec:
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |-
|
||||
kubectl --namespace={{ .Release.Namespace }} delete secret --ignore-not-found=true {{ include "etcd.caSecretName" . }} {{ include "etcd.clientSecretName" . }} &&
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret generic {{ include "etcd.caSecretName" . }} --from-file=/certs/ca.crt --from-file=/certs/ca.key --from-file=/certs/peer-key.pem --from-file=/certs/peer.pem --from-file=/certs/server-key.pem --from-file=/certs/server.pem &&
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret tls {{ include "etcd.clientSecretName" . }} --key=/certs/root-client-key.pem --cert=/certs/root-client.pem
|
||||
command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
if kubectl get secret {{ include "etcd.caSecretName" . }} --namespace={{ .Release.Namespace }} &>/dev/null; then
|
||||
echo "Secret {{ include "etcd.caSecretName" . }} already exists"
|
||||
else
|
||||
echo "Creating secret {{ include "etcd.caSecretName" . }}"
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret generic {{ include "etcd.caSecretName" . }} --from-file=/certs/ca.crt --from-file=/certs/ca.key --from-file=/certs/peer-key.pem --from-file=/certs/peer.pem --from-file=/certs/server-key.pem --from-file=/certs/server.pem
|
||||
fi
|
||||
if kubectl get secret {{ include "etcd.clientSecretName" . }} --namespace={{ .Release.Namespace }} &>/dev/null; then
|
||||
echo "Secret {{ include "etcd.clientSecretName" . }} already exists"
|
||||
else
|
||||
echo "Creating secret {{ include "etcd.clientSecretName" . }}"
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret tls {{ include "etcd.clientSecretName" . }} --key=/certs/root-client-key.pem --cert=/certs/root-client.pem
|
||||
fi
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: certs
|
||||
|
||||
@@ -15,6 +15,7 @@ rules:
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- delete
|
||||
resourceNames:
|
||||
- {{ include "etcd.caSecretName" . }}
|
||||
|
||||
@@ -22,6 +22,10 @@ spec:
|
||||
- name: certs
|
||||
secret:
|
||||
secretName: {{ include "etcd.caSecretName" . }}
|
||||
{{- with .Values.etcd.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: etcd
|
||||
image: {{ .Values.etcd.image.repository }}:{{ .Values.etcd.image.tag | default "v3.5.4" }}
|
||||
|
||||
@@ -54,12 +54,15 @@ etcd:
|
||||
name: ""
|
||||
persistence:
|
||||
size: 10Gi
|
||||
storageClass: ""
|
||||
storageClassName: ""
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
# -- The custom annotations to add to the PVC
|
||||
customAnnotations: {}
|
||||
# volumeType: local
|
||||
|
||||
# -- (array) Kubernetes affinity rules to apply to Kamaji etcd pods
|
||||
tolerations: []
|
||||
|
||||
overrides:
|
||||
caSecret:
|
||||
@@ -157,7 +160,9 @@ loggingDevel:
|
||||
enable: false
|
||||
|
||||
datastore:
|
||||
# -- (string) The Datastore name override, if empty defaults to `default`
|
||||
# -- (bool) Enable the Kamaji Datastore creation (default=true)
|
||||
enabled: true
|
||||
# -- (string) The Datastore name override, if empty and enabled=true defaults to `default`, if enabled=false, this is the name of the Datastore to connect to.
|
||||
nameOverride:
|
||||
# -- (string) The Kamaji Datastore driver, supported: etcd, MySQL, PostgreSQL (defaults=etcd).
|
||||
driver: etcd
|
||||
|
||||
@@ -14,10 +14,14 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
cmdutils "github.com/clastix/kamaji/cmd/utils"
|
||||
@@ -31,6 +35,7 @@ import (
|
||||
"github.com/clastix/kamaji/internal/webhook/routes"
|
||||
)
|
||||
|
||||
//nolint:maintidx
|
||||
func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
// CLI flags
|
||||
var (
|
||||
@@ -40,6 +45,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
tmpDirectory string
|
||||
kineImage string
|
||||
controllerReconcileTimeout time.Duration
|
||||
cacheResyncPeriod time.Duration
|
||||
datastore string
|
||||
managerNamespace string
|
||||
managerServiceAccountName string
|
||||
@@ -91,13 +97,22 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", goRuntime.GOOS, goRuntime.GOARCH))
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
MetricsBindAddress: metricsBindAddress,
|
||||
Port: 9443,
|
||||
Scheme: scheme,
|
||||
Metrics: metricsserver.Options{
|
||||
BindAddress: metricsBindAddress,
|
||||
},
|
||||
WebhookServer: ctrlwebhook.NewServer(ctrlwebhook.Options{
|
||||
Port: 9443,
|
||||
}),
|
||||
HealthProbeBindAddress: healthProbeBindAddress,
|
||||
LeaderElection: leaderElect,
|
||||
LeaderElectionNamespace: managerNamespace,
|
||||
LeaderElectionID: "799b98bc.clastix.io",
|
||||
NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) {
|
||||
opts.SyncPeriod = &cacheResyncPeriod
|
||||
|
||||
return cache.New(config, opts)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to start manager")
|
||||
@@ -105,9 +120,9 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
tcpChannel := make(controllers.TenantControlPlaneChannel)
|
||||
tcpChannel, certChannel := make(controllers.TenantControlPlaneChannel), make(controllers.CertificateChannel)
|
||||
|
||||
if err = (&controllers.DataStore{TenantControlPlaneTrigger: tcpChannel}).SetupWithManager(mgr); err != nil {
|
||||
if err = (&controllers.DataStore{Client: mgr.GetClient(), TenantControlPlaneTrigger: tcpChannel}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "DataStore")
|
||||
|
||||
return err
|
||||
@@ -122,6 +137,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
KineContainerImage: kineImage,
|
||||
TmpBaseDirectory: tmpDirectory,
|
||||
},
|
||||
CertificateChan: certChannel,
|
||||
TriggerChan: tcpChannel,
|
||||
KamajiNamespace: managerNamespace,
|
||||
KamajiServiceAccount: managerServiceAccountName,
|
||||
@@ -136,6 +152,12 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = (&controllers.CertificateLifecycle{Channel: certChannel}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "CertificateLifecycle")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err = (&kamajiv1alpha1.DatastoreUsedSecret{}).SetupWithManager(ctx, mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create indexer", "indexer", "DatastoreUsedSecret")
|
||||
|
||||
@@ -232,13 +254,14 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
cmd.Flags().StringVar(&tmpDirectory, "tmp-directory", "/tmp/kamaji", "Directory which will be used to work with temporary files.")
|
||||
cmd.Flags().StringVar(&kineImage, "kine-image", "rancher/kine:v0.9.2-amd64", "Container image along with tag to use for the Kine sidecar container (used only if etcd-storage-type is set to one of kine strategies).")
|
||||
cmd.Flags().StringVar(&datastore, "datastore", "etcd", "The default DataStore that should be used by Kamaji to setup the required storage.")
|
||||
cmd.Flags().StringVar(&migrateJobImage, "migrate-image", fmt.Sprintf("clastix/kamaji:v%s", internal.GitTag), "Specify the container image to launch when a TenantControlPlane is migrated to a new datastore.")
|
||||
cmd.Flags().StringVar(&migrateJobImage, "migrate-image", fmt.Sprintf("clastix/kamaji:%s", internal.GitTag), "Specify the container image to launch when a TenantControlPlane is migrated to a new datastore.")
|
||||
cmd.Flags().IntVar(&maxConcurrentReconciles, "max-concurrent-tcp-reconciles", 1, "Specify the number of workers for the Tenant Control Plane controller (beware of CPU consumption)")
|
||||
cmd.Flags().StringVar(&managerNamespace, "pod-namespace", os.Getenv("POD_NAMESPACE"), "The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().StringVar(&managerServiceName, "webhook-service-name", "kamaji-webhook-service", "The Kamaji webhook server Service name which is used to get validation webhooks, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().StringVar(&managerServiceAccountName, "serviceaccount-name", os.Getenv("SERVICE_ACCOUNT"), "The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().StringVar(&webhookCAPath, "webhook-ca-path", "/tmp/k8s-webhook-server/serving-certs/ca.crt", "Path to the Manager webhook server CA, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().DurationVar(&controllerReconcileTimeout, "controller-reconcile-timeout", 30*time.Second, "The reconciliation request timeout before the controller withdraw the external resource calls, such as dealing with the Datastore, or the Tenant Control Plane API endpoint.")
|
||||
cmd.Flags().DurationVar(&cacheResyncPeriod, "cache-resync-period", 10*time.Hour, "The controller-runtime.Manager cache resync period.")
|
||||
|
||||
cobra.OnInitialize(func() {
|
||||
viper.AutomaticEnv()
|
||||
|
||||
@@ -4,9 +4,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
_ "go.uber.org/automaxprocs" // Automatically set `GOMAXPROCS` to match Linux container CPU quota.
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -22,9 +19,6 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
Use: "kamaji",
|
||||
Short: "Build and operate Kubernetes at scale with a fraction of operational burden.",
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
// Seed is required to ensure non reproducibility for the certificates generate by Kamaji.
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
utilruntime.Must(kamajiv1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(appsv1.RegisterDefaults(scheme))
|
||||
|
||||
@@ -9,8 +9,8 @@ namespace: kamaji-system
|
||||
namePrefix: kamaji-
|
||||
|
||||
# Labels to add to all resources and selectors.
|
||||
#commonLabels:
|
||||
# someName: someValue
|
||||
commonLabels:
|
||||
cluster.x-k8s.io/provider: "kamaji-core"
|
||||
|
||||
bases:
|
||||
- ../crd
|
||||
|
||||
@@ -2,6 +2,7 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
control-plane: controller-manager
|
||||
name: kamaji-system
|
||||
---
|
||||
@@ -11,6 +12,8 @@ metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: kamaji-system/kamaji-serving-cert
|
||||
controller-gen.kubebuilder.io/version: v0.11.4
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: datastores.kamaji.clastix.io
|
||||
spec:
|
||||
group: kamaji.clastix.io
|
||||
@@ -250,6 +253,8 @@ metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: kamaji-system/kamaji-serving-cert
|
||||
controller-gen.kubebuilder.io/version: v0.11.4
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: tenantcontrolplanes.kamaji.clastix.io
|
||||
spec:
|
||||
conversion:
|
||||
@@ -371,7 +376,7 @@ spec:
|
||||
description: Resources define the amount of CPU and memory to allocate to the Konnectivity server.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
@@ -401,7 +406,7 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
type: object
|
||||
type: object
|
||||
version:
|
||||
@@ -593,7 +598,7 @@ spec:
|
||||
description: HTTPHeader describes a custom header to be used in HTTP probes
|
||||
properties:
|
||||
name:
|
||||
description: The header field name
|
||||
description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.
|
||||
type: string
|
||||
value:
|
||||
description: The header field value
|
||||
@@ -618,6 +623,16 @@ spec:
|
||||
required:
|
||||
- port
|
||||
type: object
|
||||
sleep:
|
||||
description: Sleep represents the duration that the container should sleep before being terminated.
|
||||
properties:
|
||||
seconds:
|
||||
description: Seconds is the number of seconds to sleep.
|
||||
format: int64
|
||||
type: integer
|
||||
required:
|
||||
- seconds
|
||||
type: object
|
||||
tcpSocket:
|
||||
description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
properties:
|
||||
@@ -658,7 +673,7 @@ spec:
|
||||
description: HTTPHeader describes a custom header to be used in HTTP probes
|
||||
properties:
|
||||
name:
|
||||
description: The header field name
|
||||
description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.
|
||||
type: string
|
||||
value:
|
||||
description: The header field value
|
||||
@@ -683,6 +698,16 @@ spec:
|
||||
required:
|
||||
- port
|
||||
type: object
|
||||
sleep:
|
||||
description: Sleep represents the duration that the container should sleep before being terminated.
|
||||
properties:
|
||||
seconds:
|
||||
description: Seconds is the number of seconds to sleep.
|
||||
format: int64
|
||||
type: integer
|
||||
required:
|
||||
- seconds
|
||||
type: object
|
||||
tcpSocket:
|
||||
description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
properties:
|
||||
@@ -717,7 +742,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.
|
||||
description: GRPC specifies an action involving a GRPC port.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
@@ -741,7 +766,7 @@ spec:
|
||||
description: HTTPHeader describes a custom header to be used in HTTP probes
|
||||
properties:
|
||||
name:
|
||||
description: The header field name
|
||||
description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.
|
||||
type: string
|
||||
value:
|
||||
description: The header field value
|
||||
@@ -853,7 +878,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.
|
||||
description: GRPC specifies an action involving a GRPC port.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
@@ -877,7 +902,7 @@ spec:
|
||||
description: HTTPHeader describes a custom header to be used in HTTP probes
|
||||
properties:
|
||||
name:
|
||||
description: The header field name
|
||||
description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.
|
||||
type: string
|
||||
value:
|
||||
description: The header field value
|
||||
@@ -938,11 +963,28 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
resizePolicy:
|
||||
description: Resources resize policy for the container.
|
||||
items:
|
||||
description: ContainerResizePolicy represents resource resize policy for the container.
|
||||
properties:
|
||||
resourceName:
|
||||
description: 'Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.'
|
||||
type: string
|
||||
restartPolicy:
|
||||
description: Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.
|
||||
type: string
|
||||
required:
|
||||
- resourceName
|
||||
- restartPolicy
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
@@ -972,9 +1014,12 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
type: object
|
||||
type: object
|
||||
restartPolicy:
|
||||
description: 'RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod''s restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.'
|
||||
type: string
|
||||
securityContext:
|
||||
description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/'
|
||||
properties:
|
||||
@@ -1037,7 +1082,7 @@ spec:
|
||||
description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.
|
||||
properties:
|
||||
localhostProfile:
|
||||
description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost".
|
||||
description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type.
|
||||
type: string
|
||||
type:
|
||||
description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."
|
||||
@@ -1055,7 +1100,7 @@ spec:
|
||||
description: GMSACredentialSpecName is the name of the GMSA credential spec to use.
|
||||
type: string
|
||||
hostProcess:
|
||||
description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.
|
||||
description: HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.
|
||||
type: boolean
|
||||
runAsUserName:
|
||||
description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
@@ -1079,7 +1124,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.
|
||||
description: GRPC specifies an action involving a GRPC port.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
@@ -1103,7 +1148,7 @@ spec:
|
||||
description: HTTPHeader describes a custom header to be used in HTTP probes
|
||||
properties:
|
||||
name:
|
||||
description: The header field name
|
||||
description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.
|
||||
type: string
|
||||
value:
|
||||
description: The header field value
|
||||
@@ -1394,7 +1439,7 @@ spec:
|
||||
description: HTTPHeader describes a custom header to be used in HTTP probes
|
||||
properties:
|
||||
name:
|
||||
description: The header field name
|
||||
description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.
|
||||
type: string
|
||||
value:
|
||||
description: The header field value
|
||||
@@ -1419,6 +1464,16 @@ spec:
|
||||
required:
|
||||
- port
|
||||
type: object
|
||||
sleep:
|
||||
description: Sleep represents the duration that the container should sleep before being terminated.
|
||||
properties:
|
||||
seconds:
|
||||
description: Seconds is the number of seconds to sleep.
|
||||
format: int64
|
||||
type: integer
|
||||
required:
|
||||
- seconds
|
||||
type: object
|
||||
tcpSocket:
|
||||
description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
properties:
|
||||
@@ -1459,7 +1514,7 @@ spec:
|
||||
description: HTTPHeader describes a custom header to be used in HTTP probes
|
||||
properties:
|
||||
name:
|
||||
description: The header field name
|
||||
description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.
|
||||
type: string
|
||||
value:
|
||||
description: The header field value
|
||||
@@ -1484,6 +1539,16 @@ spec:
|
||||
required:
|
||||
- port
|
||||
type: object
|
||||
sleep:
|
||||
description: Sleep represents the duration that the container should sleep before being terminated.
|
||||
properties:
|
||||
seconds:
|
||||
description: Seconds is the number of seconds to sleep.
|
||||
format: int64
|
||||
type: integer
|
||||
required:
|
||||
- seconds
|
||||
type: object
|
||||
tcpSocket:
|
||||
description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
properties:
|
||||
@@ -1518,7 +1583,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.
|
||||
description: GRPC specifies an action involving a GRPC port.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
@@ -1542,7 +1607,7 @@ spec:
|
||||
description: HTTPHeader describes a custom header to be used in HTTP probes
|
||||
properties:
|
||||
name:
|
||||
description: The header field name
|
||||
description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.
|
||||
type: string
|
||||
value:
|
||||
description: The header field value
|
||||
@@ -1654,7 +1719,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.
|
||||
description: GRPC specifies an action involving a GRPC port.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
@@ -1678,7 +1743,7 @@ spec:
|
||||
description: HTTPHeader describes a custom header to be used in HTTP probes
|
||||
properties:
|
||||
name:
|
||||
description: The header field name
|
||||
description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.
|
||||
type: string
|
||||
value:
|
||||
description: The header field value
|
||||
@@ -1739,11 +1804,28 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
resizePolicy:
|
||||
description: Resources resize policy for the container.
|
||||
items:
|
||||
description: ContainerResizePolicy represents resource resize policy for the container.
|
||||
properties:
|
||||
resourceName:
|
||||
description: 'Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.'
|
||||
type: string
|
||||
restartPolicy:
|
||||
description: Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.
|
||||
type: string
|
||||
required:
|
||||
- resourceName
|
||||
- restartPolicy
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
resources:
|
||||
description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
@@ -1773,9 +1855,12 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
type: object
|
||||
type: object
|
||||
restartPolicy:
|
||||
description: 'RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod''s restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.'
|
||||
type: string
|
||||
securityContext:
|
||||
description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/'
|
||||
properties:
|
||||
@@ -1838,7 +1923,7 @@ spec:
|
||||
description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.
|
||||
properties:
|
||||
localhostProfile:
|
||||
description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost".
|
||||
description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type.
|
||||
type: string
|
||||
type:
|
||||
description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."
|
||||
@@ -1856,7 +1941,7 @@ spec:
|
||||
description: GMSACredentialSpecName is the name of the GMSA credential spec to use.
|
||||
type: string
|
||||
hostProcess:
|
||||
description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.
|
||||
description: HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.
|
||||
type: boolean
|
||||
runAsUserName:
|
||||
description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
@@ -1880,7 +1965,7 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.
|
||||
description: GRPC specifies an action involving a GRPC port.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
@@ -1904,7 +1989,7 @@ spec:
|
||||
description: HTTPHeader describes a custom header to be used in HTTP probes
|
||||
properties:
|
||||
name:
|
||||
description: The header field name
|
||||
description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.
|
||||
type: string
|
||||
value:
|
||||
description: The header field value
|
||||
@@ -2377,7 +2462,7 @@ spec:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: 'sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir'
|
||||
description: 'sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
type: object
|
||||
@@ -2437,21 +2522,6 @@ spec:
|
||||
resources:
|
||||
description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
name:
|
||||
description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-map-keys:
|
||||
- name
|
||||
x-kubernetes-list-type: map
|
||||
limits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
@@ -2468,7 +2538,7 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
type: object
|
||||
type: object
|
||||
selector:
|
||||
@@ -2505,6 +2575,9 @@ spec:
|
||||
storageClassName:
|
||||
description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
|
||||
type: string
|
||||
volumeAttributesClassName:
|
||||
description: 'volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it''s not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.'
|
||||
type: string
|
||||
volumeMode:
|
||||
description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.
|
||||
type: string
|
||||
@@ -2759,6 +2832,55 @@ spec:
|
||||
items:
|
||||
description: Projection that may be projected along with other supported volume types
|
||||
properties:
|
||||
clusterTrustBundle:
|
||||
description: "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. \n Alpha, gated by the ClusterTrustBundleProjection feature gate. \n ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. \n Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time."
|
||||
properties:
|
||||
labelSelector:
|
||||
description: Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as "match nothing". If set but empty, interpreted as "match everything".
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
name:
|
||||
description: Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector.
|
||||
type: string
|
||||
optional:
|
||||
description: If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.
|
||||
type: boolean
|
||||
path:
|
||||
description: Relative path from the volume root to write the bundle.
|
||||
type: string
|
||||
signerName:
|
||||
description: Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated.
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
type: object
|
||||
configMap:
|
||||
description: configMap information about the configMap data to project
|
||||
properties:
|
||||
@@ -3216,7 +3338,7 @@ spec:
|
||||
description: Required. A pod affinity term, associated with the corresponding weight.
|
||||
properties:
|
||||
labelSelector:
|
||||
description: A label query over a set of resources, in this case pods.
|
||||
description: A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
@@ -3246,6 +3368,18 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
matchLabelKeys:
|
||||
description: MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
mismatchLabelKeys:
|
||||
description: MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
namespaceSelector:
|
||||
description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
|
||||
properties:
|
||||
@@ -3303,7 +3437,7 @@ spec:
|
||||
description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
|
||||
properties:
|
||||
labelSelector:
|
||||
description: A label query over a set of resources, in this case pods.
|
||||
description: A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
@@ -3333,6 +3467,18 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
matchLabelKeys:
|
||||
description: MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
mismatchLabelKeys:
|
||||
description: MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
namespaceSelector:
|
||||
description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
|
||||
properties:
|
||||
@@ -3389,7 +3535,7 @@ spec:
|
||||
description: Required. A pod affinity term, associated with the corresponding weight.
|
||||
properties:
|
||||
labelSelector:
|
||||
description: A label query over a set of resources, in this case pods.
|
||||
description: A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
@@ -3419,6 +3565,18 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
matchLabelKeys:
|
||||
description: MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
mismatchLabelKeys:
|
||||
description: MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
namespaceSelector:
|
||||
description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
|
||||
properties:
|
||||
@@ -3476,7 +3634,7 @@ spec:
|
||||
description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
|
||||
properties:
|
||||
labelSelector:
|
||||
description: A label query over a set of resources, in this case pods.
|
||||
description: A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
@@ -3506,6 +3664,18 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
matchLabelKeys:
|
||||
description: MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
mismatchLabelKeys:
|
||||
description: MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
namespaceSelector:
|
||||
description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
|
||||
properties:
|
||||
@@ -3612,7 +3782,7 @@ spec:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
@@ -3642,14 +3812,14 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
type: object
|
||||
type: object
|
||||
controllerManager:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
@@ -3679,14 +3849,14 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
type: object
|
||||
type: object
|
||||
kine:
|
||||
description: Define the kine container resources. Available only if Kamaji is running using Kine as backing storage.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
@@ -3716,14 +3886,14 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
type: object
|
||||
type: object
|
||||
scheduler:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
claims:
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable."
|
||||
description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
|
||||
items:
|
||||
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
properties:
|
||||
@@ -3753,7 +3923,7 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
@@ -3848,7 +4018,7 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
matchLabelKeys:
|
||||
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||
description: "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. \n This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -4214,6 +4384,9 @@ spec:
|
||||
ip:
|
||||
description: IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)
|
||||
type: string
|
||||
ipMode:
|
||||
description: IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. Setting this to "VIP" indicates that traffic is delivered to the node with the destination set to the load-balancer's IP and port. Setting this to "Proxy" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.
|
||||
type: string
|
||||
ports:
|
||||
description: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it
|
||||
items:
|
||||
@@ -4519,36 +4692,36 @@ spec:
|
||||
description: KubernetesIngressStatus defines the status for the Tenant Control Plane Ingress in the management cluster.
|
||||
properties:
|
||||
loadBalancer:
|
||||
description: LoadBalancer contains the current status of the load-balancer.
|
||||
description: loadBalancer contains the current status of the load-balancer.
|
||||
properties:
|
||||
ingress:
|
||||
description: Ingress is a list containing ingress points for the load-balancer.
|
||||
description: ingress is a list containing ingress points for the load-balancer.
|
||||
items:
|
||||
description: IngressLoadBalancerIngress represents the status of a load-balancer ingress point.
|
||||
properties:
|
||||
hostname:
|
||||
description: Hostname is set for load-balancer ingress points that are DNS based.
|
||||
description: hostname is set for load-balancer ingress points that are DNS based.
|
||||
type: string
|
||||
ip:
|
||||
description: IP is set for load-balancer ingress points that are IP based.
|
||||
description: ip is set for load-balancer ingress points that are IP based.
|
||||
type: string
|
||||
ports:
|
||||
description: Ports provides information about the ports exposed by this LoadBalancer.
|
||||
description: ports provides information about the ports exposed by this LoadBalancer.
|
||||
items:
|
||||
description: IngressPortStatus represents the error condition of a service port
|
||||
properties:
|
||||
error:
|
||||
description: 'Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use CamelCase names - cloud provider specific error values must have names that comply with the format foo.example.com/CamelCase. --- The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)'
|
||||
description: 'error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use CamelCase names - cloud provider specific error values must have names that comply with the format foo.example.com/CamelCase. --- The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)'
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
port:
|
||||
description: Port is the port number of the ingress port.
|
||||
description: port is the port number of the ingress port.
|
||||
format: int32
|
||||
type: integer
|
||||
protocol:
|
||||
default: TCP
|
||||
description: 'Protocol is the protocol of the ingress port. The supported values are: "TCP", "UDP", "SCTP"'
|
||||
description: 'protocol is the protocol of the ingress port. The supported values are: "TCP", "UDP", "SCTP"'
|
||||
type: string
|
||||
required:
|
||||
- port
|
||||
@@ -4633,6 +4806,9 @@ spec:
|
||||
ip:
|
||||
description: IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)
|
||||
type: string
|
||||
ipMode:
|
||||
description: IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. Setting this to "VIP" indicates that traffic is delivered to the node with the destination set to the load-balancer's IP and port. Setting this to "Proxy" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.
|
||||
type: string
|
||||
ports:
|
||||
description: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it
|
||||
items:
|
||||
@@ -4744,12 +4920,16 @@ spec:
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-controller-manager
|
||||
namespace: kamaji-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-leader-election-role
|
||||
namespace: kamaji-system
|
||||
rules:
|
||||
@@ -4788,6 +4968,8 @@ rules:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-manager-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -4910,6 +5092,8 @@ rules:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-metrics-reader
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
@@ -4920,6 +5104,8 @@ rules:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-proxy-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -4938,6 +5124,8 @@ rules:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-leader-election-rolebinding
|
||||
namespace: kamaji-system
|
||||
roleRef:
|
||||
@@ -4952,6 +5140,8 @@ subjects:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-manager-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -4965,6 +5155,8 @@ subjects:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-proxy-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -4991,6 +5183,8 @@ data:
|
||||
resourceName: 799b98bc.clastix.io
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-manager-config
|
||||
namespace: kamaji-system
|
||||
---
|
||||
@@ -4998,6 +5192,7 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
control-plane: controller-manager
|
||||
name: kamaji-controller-manager-metrics-service
|
||||
namespace: kamaji-system
|
||||
@@ -5008,6 +5203,7 @@ spec:
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
control-plane: controller-manager
|
||||
---
|
||||
apiVersion: v1
|
||||
@@ -5020,6 +5216,7 @@ metadata:
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/name: service
|
||||
app.kubernetes.io/part-of: operator
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-webhook-service
|
||||
namespace: kamaji-system
|
||||
spec:
|
||||
@@ -5028,12 +5225,14 @@ spec:
|
||||
protocol: TCP
|
||||
targetPort: 9443
|
||||
selector:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
control-plane: controller-manager
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
control-plane: controller-manager
|
||||
name: kamaji-controller-manager
|
||||
namespace: kamaji-system
|
||||
@@ -5041,16 +5240,19 @@ spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
control-plane: controller-manager
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
control-plane: controller-manager
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- manager
|
||||
- --leader-elect
|
||||
- --datastore=kamaji-etcd
|
||||
command:
|
||||
- /kamaji
|
||||
env:
|
||||
@@ -5062,7 +5264,7 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.serviceAccountName
|
||||
image: clastix/kamaji:v0.3.2
|
||||
image: clastix/kamaji:v0.4.0
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -5117,6 +5319,7 @@ metadata:
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/name: certificate
|
||||
app.kubernetes.io/part-of: operator
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-serving-cert
|
||||
namespace: kamaji-system
|
||||
spec:
|
||||
@@ -5138,6 +5341,7 @@ metadata:
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/name: issuer
|
||||
app.kubernetes.io/part-of: operator
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-selfsigned-issuer
|
||||
namespace: kamaji-system
|
||||
spec:
|
||||
@@ -5146,6 +5350,8 @@ spec:
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: DataStore
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-etcd
|
||||
namespace: kamaji-system
|
||||
spec:
|
||||
@@ -5183,6 +5389,7 @@ apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
control-plane: controller-manager
|
||||
name: kamaji-controller-manager-metrics-monitor
|
||||
namespace: kamaji-system
|
||||
@@ -5210,6 +5417,7 @@ metadata:
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/name: mutatingwebhookconfiguration
|
||||
app.kubernetes.io/part-of: operator
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-mutating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
@@ -5245,6 +5453,7 @@ metadata:
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/name: validatingwebhookconfiguration
|
||||
app.kubernetes.io/part-of: operator
|
||||
cluster.x-k8s.io/provider: kamaji-core
|
||||
name: kamaji-validating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
|
||||
@@ -13,4 +13,4 @@ kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: clastix/kamaji
|
||||
newTag: v0.3.2
|
||||
newTag: v0.4.0
|
||||
|
||||
@@ -30,6 +30,7 @@ spec:
|
||||
args:
|
||||
- manager
|
||||
- --leader-elect
|
||||
- --datastore=kamaji-etcd
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
|
||||
10
config/metadata.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
# maps release series of major.minor to cluster-api contract version
|
||||
# the contract version may change between minor or major versions, but *not*
|
||||
# between patch versions.
|
||||
#
|
||||
# update this file only when a new major or minor version is released
|
||||
apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3
|
||||
releaseSeries:
|
||||
- major: 0
|
||||
minor: 3
|
||||
contract: v1beta1
|
||||
@@ -2,6 +2,8 @@ apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: k8s-126
|
||||
labels:
|
||||
tenant.clastix.io: k8s-126
|
||||
spec:
|
||||
controlPlane:
|
||||
deployment:
|
||||
|
||||
@@ -2,6 +2,8 @@ apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: additionalcontainers
|
||||
labels:
|
||||
tenant.clastix.io: additionalcontainers
|
||||
spec:
|
||||
dataStore: postgresql-bronze
|
||||
controlPlane:
|
||||
|
||||
@@ -2,6 +2,8 @@ apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: additional-volumes
|
||||
labels:
|
||||
tenant.clastix.io: additional-volumes
|
||||
spec:
|
||||
controlPlane:
|
||||
deployment:
|
||||
|
||||
@@ -2,6 +2,8 @@ apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: kine
|
||||
labels:
|
||||
tenant.clastix.io: kine
|
||||
spec:
|
||||
addons:
|
||||
coreDNS: {}
|
||||
|
||||
@@ -2,6 +2,8 @@ apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: konnectivity-addon
|
||||
labels:
|
||||
tenant.clastix.io: konnectivity-addon
|
||||
spec:
|
||||
deployment:
|
||||
replicas: 2
|
||||
|
||||
10
controllers/cert_channel.go
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
)
|
||||
|
||||
type CertificateChannel chan event.GenericEvent
|
||||
160
controllers/certificate_lifecycle_controller.go
Normal file
@@ -0,0 +1,160 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientcmdapiv1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/internal/constants"
|
||||
"github.com/clastix/kamaji/internal/crypto"
|
||||
"github.com/clastix/kamaji/internal/utilities"
|
||||
)
|
||||
|
||||
type CertificateLifecycle struct {
|
||||
Channel CertificateChannel
|
||||
client client.Client
|
||||
}
|
||||
|
||||
func (s *CertificateLifecycle) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
logger.Info("starting CertificateLifecycle handling")
|
||||
|
||||
secret := corev1.Secret{}
|
||||
if err := s.client.Get(ctx, request.NamespacedName, &secret); err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
logger.Info("resource may have been deleted, skipping")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
checkType, ok := secret.GetLabels()[constants.ControllerLabelResource]
|
||||
if !ok {
|
||||
logger.Info("missing controller label, shouldn't happen")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
var crt *x509.Certificate
|
||||
var err error
|
||||
|
||||
switch checkType {
|
||||
case "x509":
|
||||
crt, err = s.extractCertificateFromBareSecret(secret)
|
||||
case "kubeconfig":
|
||||
crt, err = s.extractCertificateFromKubeconfig(secret)
|
||||
default:
|
||||
err = fmt.Errorf("unsupported strategy, %s", checkType)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logger.Error(err, "skipping reconciliation")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
deadline := time.Now().AddDate(0, 0, 1)
|
||||
|
||||
if deadline.After(crt.NotAfter) {
|
||||
logger.Info("certificate near expiration, must be rotated")
|
||||
|
||||
s.Channel <- event.GenericEvent{Object: &kamajiv1alpha1.TenantControlPlane{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secret.GetOwnerReferences()[0].Name,
|
||||
Namespace: secret.Namespace,
|
||||
},
|
||||
}}
|
||||
|
||||
logger.Info("certificate rotation triggered")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
after := crt.NotAfter.Sub(deadline)
|
||||
|
||||
logger.Info("certificate is still valid, enqueuing back", "after", after.String())
|
||||
|
||||
return reconcile.Result{Requeue: true, RequeueAfter: after}, nil
|
||||
}
|
||||
|
||||
func (s *CertificateLifecycle) extractCertificateFromBareSecret(secret corev1.Secret) (*x509.Certificate, error) {
|
||||
var crt *x509.Certificate
|
||||
var err error
|
||||
|
||||
for _, v := range secret.Data {
|
||||
if crt, err = crypto.ParseCertificateBytes(v); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if crt == nil {
|
||||
return nil, fmt.Errorf("none of the provided keys is containing a valid x509 certificate")
|
||||
}
|
||||
|
||||
return crt, nil
|
||||
}
|
||||
|
||||
func (s *CertificateLifecycle) extractCertificateFromKubeconfig(secret corev1.Secret) (*x509.Certificate, error) {
|
||||
var kc *clientcmdapiv1.Config
|
||||
var err error
|
||||
|
||||
for k := range secret.Data {
|
||||
if kc, err = utilities.DecodeKubeconfig(secret, k); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if kc == nil {
|
||||
return nil, fmt.Errorf("none of the provided keys is containing a valid kubeconfig")
|
||||
}
|
||||
|
||||
crt, err := crypto.ParseCertificateBytes(kc.AuthInfos[0].AuthInfo.ClientCertificateData)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot parse kubeconfig certificate bytes")
|
||||
}
|
||||
|
||||
return crt, nil
|
||||
}
|
||||
|
||||
func (s *CertificateLifecycle) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
s.client = mgr.GetClient()
|
||||
|
||||
supportedStrategies := sets.New[string]("x509", "kubeconfig")
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
For(&corev1.Secret{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
labels := object.GetLabels()
|
||||
|
||||
if labels == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
value, ok := labels[constants.ControllerLabelResource]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return supportedStrategies.Has(value)
|
||||
}))).
|
||||
Complete(s)
|
||||
}
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type DataStore struct {
|
||||
client client.Client
|
||||
Client client.Client
|
||||
// TenantControlPlaneTrigger is the channel used to communicate across the controllers:
|
||||
// if a Data Source is updated we have to be sure that the reconciliation of the certificates content
|
||||
// for each Tenant Control Plane is put in place properly.
|
||||
@@ -39,7 +39,7 @@ func (r *DataStore) Reconcile(ctx context.Context, request reconcile.Request) (r
|
||||
log := log.FromContext(ctx)
|
||||
|
||||
ds := &kamajiv1alpha1.DataStore{}
|
||||
if err := r.client.Get(ctx, request.NamespacedName, ds); err != nil {
|
||||
if err := r.Client.Get(ctx, request.NamespacedName, ds); err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
@@ -51,7 +51,7 @@ func (r *DataStore) Reconcile(ctx context.Context, request reconcile.Request) (r
|
||||
|
||||
tcpList := kamajiv1alpha1.TenantControlPlaneList{}
|
||||
|
||||
if err := r.client.List(ctx, &tcpList, client.MatchingFieldsSelector{
|
||||
if err := r.Client.List(ctx, &tcpList, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(kamajiv1alpha1.TenantControlPlaneUsedDataStoreKey, ds.GetName()),
|
||||
}); err != nil {
|
||||
log.Error(err, "cannot retrieve list of the Tenant Control Plane using the following instance")
|
||||
@@ -66,7 +66,7 @@ func (r *DataStore) Reconcile(ctx context.Context, request reconcile.Request) (r
|
||||
|
||||
ds.Status.UsedBy = tcpSets.List()
|
||||
|
||||
if err := r.client.Status().Update(ctx, ds); err != nil {
|
||||
if err := r.Client.Status().Update(ctx, ds); err != nil {
|
||||
log.Error(err, "cannot update the status for the given instance")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
@@ -81,12 +81,6 @@ func (r *DataStore) Reconcile(ctx context.Context, request reconcile.Request) (r
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *DataStore) InjectClient(client client.Client) error {
|
||||
r.client = client
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DataStore) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
enqueueFn := func(tcp *kamajiv1alpha1.TenantControlPlane, limitingInterface workqueue.RateLimitingInterface) {
|
||||
if dataStoreName := tcp.Status.Storage.DataStoreName; len(dataStoreName) > 0 {
|
||||
@@ -102,15 +96,15 @@ func (r *DataStore) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
For(&kamajiv1alpha1.DataStore{}, builder.WithPredicates(
|
||||
predicate.ResourceVersionChangedPredicate{},
|
||||
)).
|
||||
Watches(&source.Kind{Type: &kamajiv1alpha1.TenantControlPlane{}}, handler.Funcs{
|
||||
CreateFunc: func(createEvent event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
WatchesRawSource(source.Kind(mgr.GetCache(), &kamajiv1alpha1.TenantControlPlane{}), handler.Funcs{
|
||||
CreateFunc: func(_ context.Context, createEvent event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
enqueueFn(createEvent.Object.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
UpdateFunc: func(_ context.Context, updateEvent event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
enqueueFn(updateEvent.ObjectOld.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
enqueueFn(updateEvent.ObjectNew.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
},
|
||||
DeleteFunc: func(deleteEvent event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
DeleteFunc: func(_ context.Context, deleteEvent event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
enqueueFn(deleteEvent.Object.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
},
|
||||
}).
|
||||
|
||||
@@ -5,6 +5,7 @@ package finalizers
|
||||
|
||||
const (
|
||||
// DatastoreFinalizer is using a wrong name, since it's related to the underlying datastore.
|
||||
DatastoreFinalizer = "finalizer.kamaji.clastix.io"
|
||||
SootFinalizer = "finalizer.kamaji.clastix.io/soot"
|
||||
DatastoreFinalizer = "finalizer.kamaji.clastix.io"
|
||||
DatastoreSecretFinalizer = "finalizer.kamaji.clastix.io/datastore-secret"
|
||||
SootFinalizer = "finalizer.kamaji.clastix.io/soot"
|
||||
)
|
||||
|
||||
@@ -40,6 +40,7 @@ type GroupDeletableResourceBuilderConfiguration struct {
|
||||
tcpReconcilerConfig TenantControlPlaneReconcilerConfig
|
||||
tenantControlPlane kamajiv1alpha1.TenantControlPlane
|
||||
connection datastore.Connection
|
||||
dataStore kamajiv1alpha1.DataStore
|
||||
}
|
||||
|
||||
// GetResources returns a list of resources that will be used to provide tenant control planes
|
||||
@@ -60,6 +61,11 @@ func GetDeletableResources(tcp *kamajiv1alpha1.TenantControlPlane, config GroupD
|
||||
Client: config.client,
|
||||
Connection: config.connection,
|
||||
})
|
||||
res = append(res, &ds.Config{
|
||||
Client: config.client,
|
||||
ConnString: config.connection.GetConnectionString(),
|
||||
DataStore: config.dataStore,
|
||||
})
|
||||
}
|
||||
|
||||
return res
|
||||
@@ -176,6 +182,12 @@ func getKubeconfigResources(c client.Client, tcpReconcilerConfig TenantControlPl
|
||||
KubeConfigFileName: resources.AdminKubeConfigFileName,
|
||||
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
|
||||
},
|
||||
&resources.KubeconfigResource{
|
||||
Name: "admin-kubeconfig",
|
||||
Client: c,
|
||||
KubeConfigFileName: resources.SuperAdminKubeConfigFileName,
|
||||
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
|
||||
},
|
||||
&resources.KubeconfigResource{
|
||||
Name: "controller-manager-kubeconfig",
|
||||
Client: c,
|
||||
|
||||
@@ -35,7 +35,7 @@ type CoreDNS struct {
|
||||
TriggerChannel chan event.GenericEvent
|
||||
}
|
||||
|
||||
func (c *CoreDNS) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||
func (c *CoreDNS) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
|
||||
tcp, err := c.GetTenantControlPlaneFunc()
|
||||
if err != nil {
|
||||
c.logger.Error(err, "cannot retrieve TenantControlPlane")
|
||||
@@ -79,7 +79,7 @@ func (c *CoreDNS) SetupWithManager(mgr manager.Manager) error {
|
||||
For(&rbacv1.ClusterRoleBinding{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return object.GetName() == kubeadm.CoreDNSClusterRoleBindingName
|
||||
}))).
|
||||
Watches(&source.Channel{Source: c.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: c.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Owns(&rbacv1.ClusterRole{}).
|
||||
Owns(&corev1.ServiceAccount{}).
|
||||
Owns(&corev1.Service{}).
|
||||
|
||||
@@ -80,7 +80,7 @@ func (k *KonnectivityAgent) SetupWithManager(mgr manager.Manager) error {
|
||||
For(&appsv1.DaemonSet{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return object.GetName() == konnectivity.AgentName && object.GetNamespace() == konnectivity.AgentNamespace
|
||||
}))).
|
||||
Watches(&source.Kind{Type: &corev1.ServiceAccount{}}, handler.EnqueueRequestsFromMapFunc(func(object client.Object) []reconcile.Request {
|
||||
WatchesRawSource(source.Kind(mgr.GetCache(), &corev1.ServiceAccount{}), handler.EnqueueRequestsFromMapFunc(func(_ context.Context, object client.Object) []reconcile.Request {
|
||||
if object.GetName() == konnectivity.AgentName && object.GetNamespace() == konnectivity.AgentNamespace {
|
||||
return []reconcile.Request{
|
||||
{
|
||||
@@ -94,7 +94,7 @@ func (k *KonnectivityAgent) SetupWithManager(mgr manager.Manager) error {
|
||||
|
||||
return nil
|
||||
})).
|
||||
Watches(&source.Kind{Type: &v1.ClusterRoleBinding{}}, handler.EnqueueRequestsFromMapFunc(func(object client.Object) []reconcile.Request {
|
||||
WatchesRawSource(source.Kind(mgr.GetCache(), &v1.ClusterRoleBinding{}), handler.EnqueueRequestsFromMapFunc(func(_ context.Context, object client.Object) []reconcile.Request {
|
||||
if object.GetName() == konnectivity.CertCommonName {
|
||||
return []reconcile.Request{
|
||||
{
|
||||
@@ -107,6 +107,6 @@ func (k *KonnectivityAgent) SetupWithManager(mgr manager.Manager) error {
|
||||
|
||||
return nil
|
||||
})).
|
||||
Watches(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Complete(k)
|
||||
}
|
||||
|
||||
@@ -67,6 +67,6 @@ func (k *KubeadmPhase) SetupWithManager(mgr manager.Manager) error {
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
For(k.Phase.GetWatchedObject(), builder.WithPredicates(predicate.NewPredicateFuncs(k.Phase.GetPredicateFunc()))).
|
||||
Watches(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Complete(k)
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ func (k *KubeProxy) SetupWithManager(mgr manager.Manager) error {
|
||||
For(&rbacv1.ClusterRoleBinding{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return object.GetName() == kubeadm.KubeProxyClusterRoleBindingName
|
||||
}))).
|
||||
Watches(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Owns(&corev1.ServiceAccount{}).
|
||||
Owns(&rbacv1.Role{}).
|
||||
Owns(&rbacv1.RoleBinding{}).
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -84,7 +84,7 @@ func (m *Migrate) createOrUpdate(ctx context.Context) error {
|
||||
{
|
||||
Name: "leases.migrate.kamaji.clastix.io",
|
||||
ClientConfig: admissionregistrationv1.WebhookClientConfig{
|
||||
URL: pointer.String(fmt.Sprintf("https://%s.%s.svc:443/migrate", m.WebhookServiceName, m.WebhookNamespace)),
|
||||
URL: pointer.To(fmt.Sprintf("https://%s.%s.svc:443/migrate", m.WebhookServiceName, m.WebhookNamespace)),
|
||||
CABundle: m.WebhookCABundle,
|
||||
},
|
||||
Rules: []admissionregistrationv1.RuleWithOperations{
|
||||
@@ -128,7 +128,7 @@ func (m *Migrate) createOrUpdate(ctx context.Context) error {
|
||||
{
|
||||
Name: "catchall.migrate.kamaji.clastix.io",
|
||||
ClientConfig: admissionregistrationv1.WebhookClientConfig{
|
||||
URL: pointer.String(fmt.Sprintf("https://%s.%s.svc:443/migrate", m.WebhookServiceName, m.WebhookNamespace)),
|
||||
URL: pointer.To(fmt.Sprintf("https://%s.%s.svc:443/migrate", m.WebhookServiceName, m.WebhookNamespace)),
|
||||
CABundle: m.WebhookCABundle,
|
||||
},
|
||||
Rules: []admissionregistrationv1.RuleWithOperations{
|
||||
@@ -187,7 +187,7 @@ func (m *Migrate) SetupWithManager(mgr manager.Manager) error {
|
||||
|
||||
return object.GetName() == vwc.GetName()
|
||||
}))).
|
||||
Watches(&source.Channel{Source: m.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: m.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Complete(m)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,13 +12,13 @@ import (
|
||||
"k8s.io/client-go/util/retry"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
@@ -175,10 +175,12 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
}()
|
||||
|
||||
mgr, err := controllerruntime.NewManager(tcpRest, controllerruntime.Options{
|
||||
Logger: log.Log.WithName(fmt.Sprintf("soot_%s_%s", tcp.GetNamespace(), tcp.GetName())),
|
||||
Scheme: m.client.Scheme(),
|
||||
MetricsBindAddress: "0",
|
||||
NewClient: func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) {
|
||||
Logger: log.Log.WithName(fmt.Sprintf("soot_%s_%s", tcp.GetNamespace(), tcp.GetName())),
|
||||
Scheme: m.client.Scheme(),
|
||||
Metrics: metricsserver.Options{
|
||||
BindAddress: "0",
|
||||
},
|
||||
NewClient: func(config *rest.Config, options client.Options) (client.Client, error) {
|
||||
return client.New(config, client.Options{
|
||||
Scheme: m.client.Scheme(),
|
||||
})
|
||||
@@ -256,6 +258,17 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
if err = bootstrapToken.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
kubeadmRbac := &controllers.KubeadmPhase{
|
||||
GetTenantControlPlaneFunc: m.retrieveTenantControlPlane(tcpCtx, request),
|
||||
Phase: &resources.KubeadmPhase{
|
||||
Client: m.AdminClient,
|
||||
Phase: resources.PhaseClusterAdminRBAC,
|
||||
},
|
||||
}
|
||||
if err = kubeadmRbac.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// Starting the manager
|
||||
go func() {
|
||||
if err = mgr.Start(tcpCtx); err != nil {
|
||||
@@ -289,7 +302,7 @@ func (m *Manager) SetupWithManager(mgr manager.Manager) error {
|
||||
m.sootMap = make(map[string]sootItem)
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
Watches(&source.Channel{Source: m.sootManagerErrChan}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: m.sootManagerErrChan}, &handler.EnqueueRequestForObject{}).
|
||||
For(&kamajiv1alpha1.TenantControlPlane{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
obj := object.(*kamajiv1alpha1.TenantControlPlane) //nolint:forcetypeassert
|
||||
// status is required to understand if we have to start or stop the soot manager
|
||||
|
||||
@@ -50,6 +50,10 @@ type TenantControlPlaneReconciler struct {
|
||||
KamajiService string
|
||||
KamajiMigrateImage string
|
||||
MaxConcurrentReconciles int
|
||||
// CertificateChan is the channel used by the CertificateLifecycleController that is checking for
|
||||
// certificates and kubeconfig user certs validity: a generic event for the given TCP will be triggered
|
||||
// once the validity threshold for the given certificate is reached.
|
||||
CertificateChan CertificateChannel
|
||||
|
||||
clock mutex.Clock
|
||||
}
|
||||
@@ -141,6 +145,7 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
tcpReconcilerConfig: r.Config,
|
||||
tenantControlPlane: *tenantControlPlane,
|
||||
connection: dsConnection,
|
||||
dataStore: *ds,
|
||||
}
|
||||
|
||||
for _, resource := range GetDeletableResources(tenantControlPlane, groupDeletableResourceBuilderConfiguration) {
|
||||
@@ -223,7 +228,15 @@ func (r *TenantControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error
|
||||
r.clock = clock.RealClock{}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Watches(&source.Channel{Source: r.TriggerChan}, handler.Funcs{GenericFunc: func(genericEvent event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
WatchesRawSource(&source.Channel{Source: r.CertificateChan}, handler.Funcs{GenericFunc: func(_ context.Context, genericEvent event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
limitingInterface.AddRateLimited(ctrl.Request{
|
||||
NamespacedName: k8stypes.NamespacedName{
|
||||
Namespace: genericEvent.Object.GetNamespace(),
|
||||
Name: genericEvent.Object.GetName(),
|
||||
},
|
||||
})
|
||||
}}).
|
||||
WatchesRawSource(&source.Channel{Source: r.TriggerChan}, handler.Funcs{GenericFunc: func(_ context.Context, genericEvent event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
limitingInterface.AddRateLimited(ctrl.Request{
|
||||
NamespacedName: k8stypes.NamespacedName{
|
||||
Namespace: genericEvent.Object.GetNamespace(),
|
||||
@@ -237,7 +250,7 @@ func (r *TenantControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error
|
||||
Owns(&appsv1.Deployment{}).
|
||||
Owns(&corev1.Service{}).
|
||||
Owns(&networkingv1.Ingress{}).
|
||||
Watches(&source.Kind{Type: &batchv1.Job{}}, handler.EnqueueRequestsFromMapFunc(func(object client.Object) []reconcile.Request {
|
||||
WatchesRawSource(source.Kind(mgr.GetCache(), &batchv1.Job{}), handler.EnqueueRequestsFromMapFunc(func(_ context.Context, object client.Object) []reconcile.Request {
|
||||
labels := object.GetLabels()
|
||||
|
||||
name, namespace := labels["tcp.kamaji.clastix.io/name"], labels["tcp.kamaji.clastix.io/namespace"]
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
KUBERNETES_VERSION=$1; shift
|
||||
HOSTS=("$@")
|
||||
|
||||
# Install `containerd` as container runtime.
|
||||
cat << EOF | tee containerd.conf
|
||||
overlay
|
||||
br_netfilter
|
||||
EOF
|
||||
|
||||
cat << EOF | tee 99-kubernetes-cri.conf
|
||||
net.bridge.bridge-nf-call-iptables = 1
|
||||
net.ipv4.ip_forward = 1
|
||||
net.bridge.bridge-nf-call-ip6tables = 1
|
||||
EOF
|
||||
|
||||
for i in "${!HOSTS[@]}"; do
|
||||
HOST=${HOSTS[$i]}
|
||||
ssh ${USER}@${HOST} -t 'sudo apt update && sudo apt install -y containerd'
|
||||
ssh ${USER}@${HOST} -t 'sudo mkdir -p /etc/containerd'
|
||||
ssh ${USER}@${HOST} -t 'containerd config default | sed -e "s#SystemdCgroup = false#SystemdCgroup = true#g" | sudo tee -a /etc/containerd/config.toml'
|
||||
ssh ${USER}@${HOST} -t 'sudo systemctl restart containerd && sudo systemctl enable containerd'
|
||||
scp containerd.conf ${USER}@${HOST}:
|
||||
ssh ${USER}@${HOST} -t 'sudo chown -R root:root containerd.conf && sudo mv containerd.conf /etc/modules-load.d/containerd.conf'
|
||||
ssh ${USER}@${HOST} -t 'sudo modprobe overlay && sudo modprobe br_netfilter'
|
||||
scp 99-kubernetes-cri.conf ${USER}@${HOST}:
|
||||
ssh ${USER}@${HOST} -t 'sudo chown -R root:root 99-kubernetes-cri.conf && sudo mv 99-kubernetes-cri.conf /etc/sysctl.d/99-kubernetes-cri.conf'
|
||||
ssh ${USER}@${HOST} -t 'sudo sysctl --system'
|
||||
done
|
||||
|
||||
rm -f containerd.conf 99-kubernetes-cri.conf
|
||||
|
||||
# Install `kubectl`, `kubelet`, and `kubeadm` in the desired version.
|
||||
|
||||
INSTALL_KUBERNETES="sudo apt install -y kubelet=${KUBERNETES_VERSION}-00 kubeadm=${KUBERNETES_VERSION}-00 kubectl=${KUBERNETES_VERSION}-00 --allow-downgrades --allow-change-held-packages"
|
||||
|
||||
for i in "${!HOSTS[@]}"; do
|
||||
HOST=${HOSTS[$i]}
|
||||
ssh ${USER}@${HOST} -t 'sudo apt update'
|
||||
ssh ${USER}@${HOST} -t 'sudo apt install -y apt-transport-https ca-certificates curl'
|
||||
ssh ${USER}@${HOST} -t 'sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg'
|
||||
ssh ${USER}@${HOST} -t 'echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list'
|
||||
ssh ${USER}@${HOST} -t 'sudo apt update'
|
||||
ssh ${USER}@${HOST} -t ${INSTALL_KUBERNETES}
|
||||
ssh ${USER}@${HOST} -t 'sudo apt-mark hold kubelet kubeadm kubectl'
|
||||
done
|
||||
@@ -1,32 +0,0 @@
|
||||
#cloud-config
|
||||
package_upgrade: true
|
||||
packages:
|
||||
- containerd
|
||||
- apt-transport-https
|
||||
- ca-certificates
|
||||
- curl
|
||||
write_files:
|
||||
- owner: root:root
|
||||
path: /etc/modules-load.d/containerd.conf
|
||||
content: |
|
||||
overlay
|
||||
br_netfilter
|
||||
- owner: root:root
|
||||
path: /etc/sysctl.d/99-kubernetes-cri.conf
|
||||
content: |
|
||||
net.bridge.bridge-nf-call-iptables = 1
|
||||
net.ipv4.ip_forward = 1
|
||||
net.bridge.bridge-nf-call-ip6tables = 1
|
||||
runcmd:
|
||||
- sudo modprobe overlay
|
||||
- sudo modprobe br_netfilter
|
||||
- sudo sysctl --system
|
||||
- sudo mkdir -p /etc/containerd
|
||||
- containerd config default | sed -e 's#SystemdCgroup = false#SystemdCgroup = true#g' | sudo tee -a /etc/containerd/config.toml
|
||||
- sudo systemctl restart containerd
|
||||
- sudo systemctl enable containerd
|
||||
- sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
|
||||
- echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
|
||||
- sudo apt update
|
||||
- sudo apt install -y kubelet=1.25.0-00 kubeadm=1.25.0-00 kubectl=1.25.0-00
|
||||
- sudo apt-mark hold kubelet kubeadm kubectl containerd
|
||||
@@ -1,41 +1,41 @@
|
||||
# Concepts
|
||||
|
||||
Kamaji is a Kubernetes Operator. It turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_.
|
||||
**Kamaji** is a **Kubernetes Control Plane Manager**. It operates Kubernetes at scale with a fraction of the operational burden. Kamaji turns any Kubernetes cluster into a _“Management Cluster”_ to orchestrate other Kubernetes clusters called _“Tenant Clusters”_.
|
||||
|
||||
These are requirements of the design behind Kamaji:
|
||||
|
||||
- Communication between the _“admin cluster”_ and a _“tenant cluster”_ is unidirectional. The _“admin cluster”_ manages a _“tenant cluster”_, but a _“tenant cluster”_ has no awareness of the _“admin cluster”_.
|
||||
- Communication between different _“tenant clusters”_ is not allowed.
|
||||
- Communication between the _“Management Cluster”_ and a _“Tenant Cluster”_ is unidirectional. The _“Management Cluster”_ manages a _“Tenant Cluster”_, but a _“Tenant Cluster”_ has no awareness of the _“Management Cluster”_.
|
||||
- Communication between different _“Tenant Clusters”_ is not allowed.
|
||||
- The worker nodes of tenant should not run anything beyond tenant's workloads.
|
||||
|
||||
Goals and scope may vary as the project evolves.
|
||||
|
||||
## Tenant Control Plane
|
||||
Kamaji is special because the Control Planes of the _“tenant cluster”_ are regular pods running in a namespace of the _“admin cluster”_ instead of a dedicated set of Virtual Machines. This solution makes running Control Planes at scale cheaper and easier to deploy and operate. The Tenant Control Plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
|
||||
Kamaji is special because the Control Planes of the _“Tenant Clusters”_ are regular pods running in a namespace of the _“Management Cluster”_ instead of a dedicated machines. This solution makes running Control Planes at scale cheaper and easier to deploy and operate. The Tenant Control Plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
|
||||
|
||||
High Availability and rolling updates of the Tenant Control Plane pods are provided by a regular Deployment. Autoscaling based on the metrics is available. A Service is used to espose the Tenant Control Plane outside of the _“admin cluster”_. The `LoadBalancer` service type is used, `NodePort` and `ClusterIP` are other viable options, depending on the case.
|
||||
High Availability and rolling updates of the Tenant Control Plane pods are provided by a regular Deployment. Autoscaling based on the metrics is available. A Service is used to espose the Tenant Control Plane outside of the _“Management Cluster”_. The `LoadBalancer` service type is used, `NodePort` and `ClusterIP` are other viable options, depending on the case.
|
||||
|
||||
Kamaji offers a [Custom Resource Definition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing a Tenant Control Plane. This *CRD* is called `TenantControlPlane`, or `tcp` in short.
|
||||
|
||||
All the _“tenant clusters”_ built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves. See [CNCF compliance](reference/conformance.md).
|
||||
All the _“Tenant Clusters”_ built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves. See [CNCF compliance](reference/conformance.md).
|
||||
|
||||
## Tenant worker nodes
|
||||
|
||||
And what about the tenant worker nodes?
|
||||
They are just _"worker nodes"_, i.e. regular virtual or bare metal machines, connecting to the APIs server of the Tenant Control Plane.
|
||||
Kamaji's goal is to manage the lifecycle of hundreds of these _“tenant clusters”_, not only one, so how to add another tenant cluster to Kamaji?
|
||||
As you could expect, you have just deploys a new Tenant Control Plane in one of the _“admin cluster”_ namespace, and then joins the tenant worker nodes to it.
|
||||
Kamaji's goal is to manage the lifecycle of hundreds of these _“Tenant Clusters”_, not only one, so how to add another Tenant Cluster to Kamaji?
|
||||
As you could expect, you have just deploys a new Tenant Control Plane in one of the _“Management Cluster”_ namespace, and then joins the tenant worker nodes to it.
|
||||
|
||||
A [Cluster API ControlPlane provider](https://github.com/clastix/cluster-api-control-plane-provider-kamaji) has been released, allowing to offer a Cluster API-native declarative lifecycle, by automating the worker nodes join.
|
||||
|
||||
## Datastores
|
||||
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each tenant cluster saves the state to be able to store and retrieve data. As we can deploy a Kubernetes cluster with an external `etcd` cluster, we explored this option for the Tenant Control Planes. On the admin cluster, you can deploy one or multi-tenant `etcd` to save the state of multiple tenant clusters. Kamaji offers a Custom Resource Definition called `DataStore` to provide a declarative approach of managing multiple datastores. By sharing the datastore between multiple tenants, the resiliency is still guaranteed and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that you have to operate external datastores, in addition to `etcd` of the _“admin cluster”_ and manage the access to be sure that each _“tenant cluster”_ uses only its data.
|
||||
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each Tenant Cluster saves the state to be able to store and retrieve data. As we can deploy a Kubernetes cluster with an external `etcd` cluster, we explored this option for the Tenant Control Planes. On the Management Cluster, you can deploy one or multi-tenant `etcd` to save the state of multiple Tenant Clusters. Kamaji offers a Custom Resource Definition called `DataStore` to provide a declarative approach of managing multiple datastores. By sharing the datastore between multiple tenants, the resiliency is still guaranteed and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that you have to operate external datastores, in addition to `etcd` of the _“Management Cluster”_ and manage the access to be sure that each _“Tenant Cluster”_ uses only its data.
|
||||
|
||||
### Other storage drivers
|
||||
Kamaji offers the option of using a more capable datastore than `etcd` to save the state of multiple tenants' clusters. Thanks to the native [kine](https://github.com/k3s-io/kine) integration, you can run _MySQL_ or _PostgreSQL_ compatible databases as datastore for _“tenant clusters”_.
|
||||
Kamaji offers the option of using a more capable datastore than `etcd` to save the state of multiple tenants' clusters. Thanks to the native [kine](https://github.com/k3s-io/kine) integration, you can run _MySQL_ or _PostgreSQL_ compatible databases as datastore for _“Tenant Clusters”_.
|
||||
|
||||
### Pooling
|
||||
By default, Kamaji is expecting to persist all the _“tenant clusters”_ data in a unique datastore that could be backed by different drivers. However, you can pick a different datastore for a specific set of _“tenant clusters”_ that could have different resources assigned or a different tiering. Pooling of multiple datastore is an option you can leverage for a very large set of _“tenant clusters”_ so you can distribute the load properly. As future improvements, we have a _datastore scheduler_ feature in roadmap so that Kamaji itself can assign automatically a _“tenant cluster”_ to the best datastore in the pool.
|
||||
By default, Kamaji is expecting to persist all the _“Tenant Clusters”_ data in a unique datastore that could be backed by different drivers. However, you can pick a different datastore for a specific set of _“Tenant Clusters”_ that could have different resources assigned or a different tiering. Pooling of multiple datastore is an option you can leverage for a very large set of _“Tenant Clusters”_ so you can distribute the load properly. As future improvements, we have a _datastore scheduler_ feature in roadmap so that Kamaji itself can assign automatically a _“Tenant Cluster”_ to the best datastore in the pool.
|
||||
|
||||
### Migration
|
||||
In order to simplify Day2 Operations and reduce the operational burden, Kamaji provides the capability to live migrate data from a datastore to another one of the same driver without manual and error prone backup and restore operations.
|
||||
|
||||
@@ -13,7 +13,7 @@ The guide requires:
|
||||
## Summary
|
||||
|
||||
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
|
||||
* [Access Admin cluster](#access-admin-cluster)
|
||||
* [Access Management Cluster](#access-management-cluster)
|
||||
* [Install Cert Manager](#install-cert-manager)
|
||||
* [Install Kamaji controller](#install-kamaji-controller)
|
||||
* [Create Tenant Cluster](#create-tenant-cluster)
|
||||
@@ -27,15 +27,15 @@ git clone https://github.com/clastix/kamaji
|
||||
cd kamaji/deploy
|
||||
```
|
||||
|
||||
We assume you have installed on the bootstrap machine:
|
||||
We assume you have installed on the bootstrap workstation:
|
||||
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
|
||||
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
|
||||
- [helm](https://helm.sh/docs/intro/install/)
|
||||
- [jq](https://stedolan.github.io/jq/)
|
||||
|
||||
## Access Admin cluster
|
||||
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The admin cluster acts as management cluster for all the Tenant clusters and hosts monitoring, logging, and governance of Kamaji setup, including all Tenant clusters.
|
||||
## Access Management Cluster
|
||||
In Kamaji, the Management Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The Management Cluster acts as cockpit for all the Tenant Clusters as it hosts monitoring, logging, and governance of Kamaji setup, including all Tenant Clusters.
|
||||
|
||||
Throughout the following instructions, shell variables are used to indicate values that you should adjust to your environment:
|
||||
|
||||
@@ -43,14 +43,14 @@ Throughout the following instructions, shell variables are used to indicate valu
|
||||
source kamaji.env
|
||||
```
|
||||
|
||||
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the admin cluster should provide:
|
||||
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the Management Clusterr should provide:
|
||||
|
||||
- CNI module installed, eg. [Calico](https://github.com/projectcalico/calico), [Cilium](https://github.com/cilium/cilium).
|
||||
- CSI module installed with a Storage Class for the Tenant datastores. Local Persistent Volumes are an option.
|
||||
- Support for LoadBalancer service type, eg. [MetalLB](https://metallb.universe.tf/), or a Cloud based controller.
|
||||
- Optionally, a Monitoring Stack installed, eg. [Prometheus](https://github.com/prometheus-community).
|
||||
|
||||
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into Kamaji Admin Cluster and check you can access:
|
||||
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into Kamaji Management Cluster and check you can access:
|
||||
|
||||
```bash
|
||||
kubectl cluster-info
|
||||
@@ -86,6 +86,21 @@ helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
|
||||
!!! note "A managed datastore is highly recommended in production"
|
||||
The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides the code to setup a multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a more robust storage system, as `MySQL` or `PostgreSQL` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
|
||||
|
||||
Now you should end up with a working Kamaji instance, including the default `datastore`:
|
||||
|
||||
```bash
|
||||
kubectl -n kamaji-system get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
etcd-0 1/1 Running 0 50s
|
||||
etcd-1 1/1 Running 0 60s
|
||||
etcd-2 1/1 Running 0 90s
|
||||
kamaji-7949578bfb-lj44p 1/1 Running 0 12s
|
||||
```
|
||||
|
||||
> An unsuccessful first installation could fail for several reasons, such as missing a `StorageClass`, or even for a trivial `Ctrl+C` during the installation phase.
|
||||
>
|
||||
> See the [Cleanup](#cleanup) section before to retry an aborted installation.
|
||||
|
||||
## Create Tenant Cluster
|
||||
|
||||
### Tenant Control Plane
|
||||
@@ -99,6 +114,8 @@ kind: TenantControlPlane
|
||||
metadata:
|
||||
name: ${TENANT_NAME}
|
||||
namespace: ${TENANT_NAMESPACE}
|
||||
labels:
|
||||
tenant.clastix.io: ${TENANT_NAME}
|
||||
spec:
|
||||
dataStore: default
|
||||
controlPlane:
|
||||
@@ -183,7 +200,7 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
|
||||
service/tenant-00 LoadBalancer 10.32.132.241 192.168.32.240 6443:32152/TCP,8132:32713/TCP 2m20s
|
||||
```
|
||||
|
||||
The regular Tenant Control Plane containers: `kube-apiserver`, `kube-controller-manager`, `kube-scheduler` are running unchanged in the `tcp` pods instead of dedicated machines and they are exposed through a service on the port `6443` of worker nodes in the admin cluster.
|
||||
The regular Tenant Control Plane containers: `kube-apiserver`, `kube-controller-manager`, `kube-scheduler` are running unchanged in the `tcp` pods instead of dedicated machines and they are exposed through a service on the port `6443` of worker nodes in the Management Cluster.
|
||||
|
||||
The `LoadBalancer` service type is used to expose the Tenant Control Plane on the assigned `loadBalancerIP` acting as `ControlPlaneEndpoint` for the worker nodes and other clients as, for example, `kubectl`. Service types `NodePort` and `ClusterIP` are still viable options to expose the Tenant Control Plane, depending on the case. High Availability and rolling updates of the Tenant Control Planes are provided by the `tcp` Deployment and all the resources reconcilied by the Kamaji controller.
|
||||
|
||||
@@ -220,7 +237,7 @@ Kubernetes control plane is running at https://192.168.32.240:6443
|
||||
CoreDNS is running at https://192.168.32.240:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
|
||||
```
|
||||
|
||||
Check out how the Tenant control Plane advertises itself to workloads:
|
||||
Check out how the Tenant Control Plane advertises itself to workloads:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get svc
|
||||
@@ -238,38 +255,34 @@ kubernetes 192.168.32.240:6443 18m
|
||||
|
||||
And make sure it is `${TENANT_ADDR}:${TENANT_PORT}`.
|
||||
|
||||
### Prepare worker nodes to join
|
||||
### Join worker nodes
|
||||
|
||||
Currently, Kamaji does not provide any helper for creation of tenant worker nodes.
|
||||
You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`.
|
||||
The Tenant Control Plane is made of pods running in the Kamaji Management Cluster. At this point, the Tenant Cluster has no worker nodes. So, the next step is to join some worker nodes to the Tenant Control Plane.
|
||||
|
||||
Kamaji is sticking to the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api) project contracts by providing a `ControlPlane` provider.
|
||||
Please, refer to the [official repository](https://github.com/clastix/cluster-api-control-plane-provider-kamaji) to learn more about it.
|
||||
Kamaji does not provide any helper for creation of tenant worker nodes, instead it leverages the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Refer to the [Cluster API guide](guides/cluster-api.md) to learn more about supported providers.
|
||||
|
||||
You can use the provided helper script `/deploy/nodes-prerequisites.sh`, in order to install the dependencies on all the worker nodes:
|
||||
An alternative approach for joining nodes is to use the `kubeadm` command on each node. Follow the related [documentation](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) in order to:
|
||||
|
||||
- Install `containerd` as container runtime
|
||||
- Install `crictl`, the command line for working with `containerd`
|
||||
- Install `kubectl`, `kubelet`, and `kubeadm` in the desired version
|
||||
- install `containerd` as container runtime
|
||||
- install `crictl`, the command line for working with `containerd`
|
||||
- install `kubectl`, `kubelet`, and `kubeadm` in the desired version
|
||||
|
||||
!!! warning ""
|
||||
The provided script is just a facility: it assumes all worker nodes are running `Ubuntu 20.04`. Make sure to adapt the script if you're using a different distribution.
|
||||
|
||||
Run the script:
|
||||
After the installation is complete on all the nodes, open the command line on your Linux workstation and store the IP address of each node in an environment variable:
|
||||
|
||||
```bash
|
||||
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
|
||||
./nodes-prerequisites.sh ${TENANT_VERSION:1} ${HOSTS[@]}
|
||||
WORKER0=<address of first node>
|
||||
WORKER1=<address of second node>
|
||||
WORKER2=<address of third node>
|
||||
```
|
||||
|
||||
### Join worker nodes
|
||||
The current approach for joining nodes is to use `kubeadm` and therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable:
|
||||
Store the join command in a variable:
|
||||
|
||||
```bash
|
||||
JOIN_CMD=$(echo "sudo ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command)
|
||||
|
||||
```
|
||||
|
||||
A bash loop will be used to join all the available nodes.
|
||||
Use a loop to log in to and run the join command on each node:
|
||||
|
||||
```bash
|
||||
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
|
||||
@@ -279,6 +292,10 @@ for i in "${!HOSTS[@]}"; do
|
||||
done
|
||||
```
|
||||
|
||||
!!! tip "yaki"
|
||||
This manual process can be further automated to handle the node prerequisites and joining. See [yaki](https://github.com/clastix/yaki) script, which you could modify for your preferred operating system and version. The provided script is just a facility: it assumes all worker nodes are running `Ubuntu 22.04`. Make sure to adapt the script if you're using a different distribution.
|
||||
|
||||
|
||||
Checking the nodes:
|
||||
|
||||
```bash
|
||||
@@ -300,7 +317,7 @@ curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/ca
|
||||
|
||||
Before to apply the Calico manifest, you can customize it as necessary according to your preferences.
|
||||
|
||||
Apply to the tenant cluster:
|
||||
Apply to the Tenant Cluster:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig apply -f calico.yaml
|
||||
@@ -317,7 +334,8 @@ tenant-00-worker-02 Ready <none> 2m32s v1.25.0
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
Remove the worker nodes joined the tenant control plane
|
||||
### Delete a Tenant Cluster
|
||||
First, remove the worker nodes joined the tenant control plane
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig delete nodes --all
|
||||
@@ -335,10 +353,37 @@ for i in "${!HOSTS[@]}"; do
|
||||
done
|
||||
```
|
||||
|
||||
Delete the tenant control plane from kamaji
|
||||
Delete the tenant control plane from Kamaji
|
||||
|
||||
```bash
|
||||
kubectl delete -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
|
||||
```
|
||||
|
||||
### Uninstall Kamaji
|
||||
Uninstall the Kamaji controller by removing the Helm release
|
||||
|
||||
```bash
|
||||
helm uninstall kamaji -n kamaji-system
|
||||
```
|
||||
|
||||
The default datastore installed three `etcd` replicas with persistent volumes, so remove the `PersistentVolumeClaims` resources:
|
||||
|
||||
```bash
|
||||
kubectl -n kamaji-system delete pvc --all
|
||||
```
|
||||
|
||||
Also delete the custom resources:
|
||||
|
||||
```bash
|
||||
kubectl delete crd tenantcontrolplanes.kamaji.clastix.io
|
||||
kubectl delete crd datastores.kamaji.clastix.io
|
||||
```
|
||||
|
||||
In case of a broken installation, manually remove the hooks installed by Kamaji:
|
||||
|
||||
```bash
|
||||
kubectl delete ValidatingWebhookConfiguration kamaji-validating-webhook-configuration
|
||||
kubectl delete MutatingWebhookConfiguration kamaji-mutating-webhook-configuration
|
||||
```
|
||||
|
||||
That's all folks!
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# Use alternative datastores
|
||||
# Use Alternative Datastores
|
||||
|
||||
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine) integration. One of the implementations is [PostgreSQL](https://www.postgresql.org/).
|
||||
|
||||
## Install the datastore
|
||||
|
||||
On the admin cluster, install one of the alternative supported datastore:
|
||||
On the Management Cluster, install one of the alternative supported datastore:
|
||||
|
||||
- **MySQL** install it with command:
|
||||
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
# Backup and restore
|
||||
# Backup and Restore
|
||||
|
||||
As mentioned in the introduction, Kamaji “tenant clusters” are just regular pods scheduled on top of a choosn admin cluster; as such, you can take advantage of the same backup and restore methods that you would use to maintain the standard workload.
|
||||
As mentioned in the introduction, Tenant Control Planes are just regular pods scheduled in the Management Cluster. As such, you can take advantage of the same backup and restore methods that you would use to maintain the standard workload.
|
||||
|
||||
This guide will assist you in how to backup and restore TCP resources on the admin cluster using [Velero](https://tanzu.vmware.com/developer/guides/what-is-velero/).
|
||||
This guide will assist you in how to backup and restore TCP resources on the Management Cluster using [Velero](https://tanzu.vmware.com/developer/guides/what-is-velero/).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before proceeding with the next steps, we assume that the following prerequisites are met:
|
||||
|
||||
- Working admin cluster
|
||||
- Working Kamaji setup
|
||||
- Working datastore resource
|
||||
- Working TCP resource
|
||||
- Velero binary installed on the operator VM
|
||||
- Velero installed on the admin cluster
|
||||
- Velero installed on the Management Cluster
|
||||
- Configured BackupStorageLocation for Velero
|
||||
|
||||
## Backup step
|
||||
|
||||
143
docs/content/guides/certs-lifecycle.md
Normal file
@@ -0,0 +1,143 @@
|
||||
# Certificates Lifecycle
|
||||
|
||||
Kamaji is responsible for creating the required certificates, such as:
|
||||
|
||||
- the Kubernetes API Server certificate
|
||||
- the Kubernetes API Server kubelet client certificate
|
||||
- the Datastore certificate
|
||||
- the front proxy client certificate
|
||||
- the konnectivity certificate (if enabled)
|
||||
|
||||
Also, the following `kubeconfig` resources contain client certificates, which are created by Kamaji, such as:
|
||||
|
||||
- `admin`
|
||||
- `controller-manager`
|
||||
- `konnectivity` (if enabled)
|
||||
- `scheduler`
|
||||
|
||||
All the certificates are created with the `kubeadm` defaults, thus their validity is set to 1 year.
|
||||
|
||||
## How to rotate certificates
|
||||
|
||||
If you need to manually rotate one of these certificates, the required operation is the deletion for the given Secret.
|
||||
|
||||
```
|
||||
$: kubectl get secret
|
||||
NAME TYPE DATA AGE
|
||||
k8s-126-admin-kubeconfig Opaque 1 12m
|
||||
k8s-126-api-server-certificate Opaque 2 12m
|
||||
k8s-126-api-server-kubelet-client-certificate Opaque 2 3h45m
|
||||
k8s-126-ca Opaque 4 3h45m
|
||||
k8s-126-controller-manager-kubeconfig Opaque 1 3h45m
|
||||
k8s-126-datastore-certificate Opaque 3 3h45m
|
||||
k8s-126-datastore-config Opaque 4 3h45m
|
||||
k8s-126-front-proxy-ca-certificate Opaque 2 3h45m
|
||||
k8s-126-front-proxy-client-certificate Opaque 2 3h45m
|
||||
k8s-126-konnectivity-certificate kubernetes.io/tls 2 3h45m
|
||||
k8s-126-konnectivity-kubeconfig Opaque 1 3h45m
|
||||
k8s-126-sa-certificate Opaque 2 3h45m
|
||||
k8s-126-scheduler-kubeconfig Opaque 1 3h45m
|
||||
```
|
||||
|
||||
Once this operation is performed, Kamaji will be notified of the missing certificate, and it will create it back.
|
||||
|
||||
```
|
||||
$: kubectl delete secret -l kamaji.clastix.io/certificate_lifecycle_controller=x509
|
||||
secret "k8s-126-api-server-certificate" deleted
|
||||
secret "k8s-126-api-server-kubelet-client-certificate" deleted
|
||||
secret "k8s-126-front-proxy-client-certificate" deleted
|
||||
secret "k8s-126-konnectivity-certificate" deleted
|
||||
|
||||
$: kubectl delete secret -l kamaji.clastix.io/certificate_lifecycle_controller=x509
|
||||
NAME TYPE DATA AGE
|
||||
k8s-126-admin-kubeconfig Opaque 1 15m
|
||||
k8s-126-api-server-certificate Opaque 2 12s
|
||||
k8s-126-api-server-kubelet-client-certificate Opaque 2 12s
|
||||
k8s-126-ca Opaque 4 3h48m
|
||||
k8s-126-controller-manager-kubeconfig Opaque 1 3h48m
|
||||
k8s-126-datastore-certificate Opaque 3 3h48m
|
||||
k8s-126-datastore-config Opaque 4 3h48m
|
||||
k8s-126-front-proxy-ca-certificate Opaque 2 3h48m
|
||||
k8s-126-front-proxy-client-certificate Opaque 2 12s
|
||||
k8s-126-konnectivity-certificate kubernetes.io/tls 2 11s
|
||||
k8s-126-konnectivity-kubeconfig Opaque 1 3h48m
|
||||
k8s-126-sa-certificate Opaque 2 3h48m
|
||||
k8s-126-scheduler-kubeconfig Opaque 1 3h48m
|
||||
```
|
||||
|
||||
You can notice the secrets have been automatically created back, as well as a TenantControlPlane rollout with the updated certificates.
|
||||
|
||||
```
|
||||
$: kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
k8s-126-76768bdf89-82w8g 4/4 Running 0 58s
|
||||
k8s-126-76768bdf89-fwltl 4/4 Running 0 58s
|
||||
```
|
||||
|
||||
The same occurs with the `kubeconfig` ones.
|
||||
|
||||
```
|
||||
$: kubectl delete secret -l kamaji.clastix.io/certificate_lifecycle_controller=kubeconfig
|
||||
secret "k8s-126-admin-kubeconfig" deleted
|
||||
secret "k8s-126-controller-manager-kubeconfig" deleted
|
||||
secret "k8s-126-konnectivity-kubeconfig" deleted
|
||||
secret "k8s-126-scheduler-kubeconfig" deleted
|
||||
|
||||
$: kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
k8s-126-576c775b5d-2gr9h 4/4 Running 0 50s
|
||||
k8s-126-576c775b5d-jmvlm 4/4 Running 0 50s
|
||||
```
|
||||
|
||||
## Automatic certificates rotation
|
||||
|
||||
The Kamaji operator will run a controller which processes all the Secrets to determine their expiration, both for the `kubeconfig`, as well as for the certificates.
|
||||
|
||||
The controller, named `CertificateLifecycle`, will extract the certificates from the _Secret_ objects notifying the `TenantControlPlaneReconciler` controller which will start a new certificate rotation.
|
||||
The rotation will occur the day before their expiration.
|
||||
|
||||
> Nota Bene:
|
||||
>
|
||||
> Kamaji is responsible for creating the `etcd` client certificate, and the generation of a new one will occur.
|
||||
> For other Datastore drivers, such as MySQL or PostgreSQL, the referenced Secret will always be deleted by the Controller to trigger the rotation:
|
||||
> the PKI management, since it's offloaded externally, must provide the renewed certificates.
|
||||
|
||||
## Certificate Authority rotation
|
||||
|
||||
Kamaji is also taking care of your Tenant Clusters Certificate Authority.
|
||||
|
||||
This can be rotated manually by deleting the following secret.
|
||||
|
||||
```
|
||||
$: kubectl delete secret k8s-126-ca
|
||||
secret "k8s-126-ca" deleted
|
||||
```
|
||||
|
||||
Once this occurs the TenantControlPlane will enter in the `CertificateAuthorityRotating` status.
|
||||
|
||||
```
|
||||
$: kubectl get tcp -w
|
||||
NAME VERSION STATUS CONTROL-PLANE ENDPOINT KUBECONFIG DATASTORE AGE
|
||||
k8s-126 v1.26.0 Ready 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 CertificateAuthorityRotating 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 Ready 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 Ready 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 Ready 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 Ready 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
k8s-126 v1.26.0 Ready 172.18.255.200:6443 k8s-126-admin-kubeconfig default 3h58m
|
||||
```
|
||||
|
||||
This operation is intended to be performed manually since a new Certificate Authority requires the restart of all the components, as well as of the nodes:
|
||||
in such case, you will need to distribute the new Certificate Authority and the new nodes certificates.
|
||||
|
||||
Given the sensibility of such operation, the `Secret` controller will not check the _CA_, which is offering validity of 10 years as `kubeadm` default values.
|
||||
6
docs/content/guides/cluster-api.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Cluster APIs Support
|
||||
|
||||
The [Cluster API](https://github.com/kubernetes-sigs/cluster-api) brings declarative, Kubernetes-style APIs to creation of Kubernetes clusters, including configuration and management.
|
||||
|
||||
Kamaji offers seamless integration with the most popular Cluster API Infrastructure Providers. Check the currently supported providers and the roadmap on the related [reposistory](https://github.com/clastix/cluster-api-control-plane-provider-kamaji).
|
||||
|
||||
90
docs/content/guides/console.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Kamaji Console
|
||||
This guide will introduce you to the basics of the Kamaji Console, a web UI to help you to view and control your Kamaji setup.
|
||||
|
||||
## Install with Helm
|
||||
The Kamaji Console is a web interface running on the Kamaji Management Cluster that you can install with Helm. Check the Helm Chart [documentation](https://github.com/clastix/kamaji-console) for all the available settings.
|
||||
|
||||
The Kamaji Console requires a Secret in the Kamaji Management Cluster that contains the configuration and credentials to access the console from the browser. You can have the Helm Chart generate it for you, or create it yourself and provide the name of the Secret during installation. Before to install the Kamaji Console, access your workstation, replace the placeholders with actual values, and execute the following command:
|
||||
|
||||
```bash
|
||||
# The secret is required, otherwise the installation will fail
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: Opaque
|
||||
metadata:
|
||||
name: kamaji-console
|
||||
namespace: kamaji-system
|
||||
data:
|
||||
# Credentials to login into console
|
||||
ADMIN_EMAIL: <email>
|
||||
ADMIN_PASSWORD: <password>
|
||||
# Secret used to sign the browser session
|
||||
JWT_SECRET: <jwtSecret>
|
||||
# URL where the console is accessible: https://<hostname>/ui
|
||||
NEXTAUTH_URL: <nextAuthUrl>
|
||||
EOF
|
||||
```
|
||||
|
||||
Install the Chart with the release name `console` in the `kamaji-system` namespace:
|
||||
|
||||
```
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm -n kamaji-system install console clastix/kamaji-console
|
||||
```
|
||||
|
||||
Show the status:
|
||||
|
||||
```
|
||||
helm status console -n kamaji-system
|
||||
```
|
||||
|
||||
## Access the Kamaji Console
|
||||
Once installed, forward the console service to the local machine:
|
||||
|
||||
```
|
||||
kubectl -n kamaji-system port-forward service/console-kamaji-console 8080:80
|
||||
Forwarding from 127.0.0.1:8080 -> 3000
|
||||
Forwarding from [::1]:8080 -> 3000
|
||||
```
|
||||
|
||||
and point the browser to `http://127.0.0.1:8080/ui` to access the console. Login with credentials you stored into the secret.
|
||||
|
||||
!!! note "Expose with Ingress"
|
||||
The Kamaji Console can be exposed with an ingress. Refer the Helm Chart documentation on how to configure it properly.
|
||||
|
||||
## Explore the Kamaji Console
|
||||
The Kamaji Console provides a high level view of all Tenant Control Planes configured in your Kamaji setup. When you login to the console you are brought to the Tenant Control Planes view, which allows you to quickly understand the state of your Kamaji setup at a glance. It shows summary information about all the Tenant Control Plane objects, including: name, namespace, status, endpoint, version, and datastore.
|
||||
|
||||

|
||||
|
||||
From this view, you can also create a new Tenant Control Plane from a basic placeholder in yaml format:
|
||||
|
||||

|
||||
|
||||
### Working with Tenant Control Plane
|
||||
From the main view, clicking on a Tenant Control Plane row will bring you to the detailed view. This view shows you all the details about the selected Tenant Control Plane, including all child components: pods, deployment, service, config maps, and secrets. From this view, you can also view, copy, and download the `kubeconfig` to access the Tenant Control Plane as tenant admin.
|
||||
|
||||

|
||||
|
||||
### Working with Datastore
|
||||
From the menu bar on the left, clicking on the Datastores item, you can access the list of provisioned Datastores. It shows a summary about datastores, including name and the used driver, i.e. etcd, mysql, and postgresql.
|
||||
|
||||

|
||||
|
||||
From this view, you can also create, delete, edit, and inspect the single datastore.
|
||||
|
||||
### Additional Operations
|
||||
The Kamaji Console offers additional capabilities as part of the commercial edition Clastix Operating Platform:
|
||||
|
||||
- Infrastructure Drivers Management
|
||||
- Applications Delivery via GitOps Operators
|
||||
- Centralized Authentication and Access Control
|
||||
- Auditing and Logging
|
||||
- Monitoring
|
||||
- Backup & Restore
|
||||
|
||||
!!! note "Ready for more?"
|
||||
To purchase entitlement to Clastix Operating Platform please contact hello@clastix.io.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Datastore Migration
|
||||
|
||||
On the admin cluster, you can deploy one or more multi-tenant datastores as `etcd`, `PostgreSQL`, and `MySQL` to save the state of the tenant clusters. A Tenant Control Plane can be migrated from a datastore to another one without service disruption or without complex and error prone backup & restore procedures.
|
||||
On the Management Cluster, you can deploy one or more multi-tenant datastores as `etcd`, `PostgreSQL`, and `MySQL` to save the state of the Tenant Clusters. A Tenant Control Plane can be migrated from a datastore to another one without service disruption or without complex and error prone backup & restore procedures.
|
||||
|
||||
This guide will assist you to live migrate Tenant's data from a datastore to another one having the same `etcd` driver.
|
||||
|
||||
@@ -169,3 +169,6 @@ admission webhook "catchall.migrate.kamaji.clastix.io" denied the request
|
||||
After a while, depending on the amount of data to migrate, the Tenant Control Plane is put back in full operating mode by the Kamaji controller.
|
||||
|
||||
> Please, note the datastore migration leaves the data on the default datastore, so you have to remove it manually.
|
||||
|
||||
## Post migration
|
||||
After migrating data to the new datastore, complete the migration procedure by restarting the `kubelet.service` on all the tenant worker nodes.
|
||||
@@ -13,7 +13,7 @@ The guide requires:
|
||||
## Summary
|
||||
|
||||
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
|
||||
* [Access Admin cluster](#access-admin-cluster)
|
||||
* [Access Management Cluster](#access-management-cluster)
|
||||
* [Install Cert Manager](#install-cert-manager)
|
||||
* [Install Kamaji controller](#install-kamaji-controller)
|
||||
* [Create Tenant Cluster](#create-tenant-cluster)
|
||||
@@ -43,8 +43,8 @@ az login
|
||||
```
|
||||
|
||||
|
||||
## Access Admin cluster
|
||||
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters. For this guide, we're going to use an instance of Azure Kubernetes Service (AKS) as Admin Cluster.
|
||||
## Access Management Cluster
|
||||
In Kamaji, a Management Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The Management Cluster acts as cockpit for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant Clusters. For this guide, we're going to use an instance of Azure Kubernetes Service (AKS) as Management Cluster.
|
||||
|
||||
Throughout the following instructions, shell variables are used to indicate values that you should adjust to your own Azure environment:
|
||||
|
||||
@@ -144,6 +144,8 @@ kind: TenantControlPlane
|
||||
metadata:
|
||||
name: ${TENANT_NAME}
|
||||
namespace: ${TENANT_NAMESPACE}
|
||||
labels:
|
||||
tenant.clastix.io: ${TENANT_NAME}
|
||||
spec:
|
||||
dataStore: default
|
||||
controlPlane:
|
||||
@@ -272,13 +274,13 @@ NAME ENDPOINTS AGE
|
||||
kubernetes 10.240.0.100:6443 57m
|
||||
```
|
||||
|
||||
### Prepare worker nodes to join
|
||||
### Join worker nodes
|
||||
|
||||
Currently, Kamaji does not provide any helper for creation of tenant worker nodes.
|
||||
You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`.
|
||||
The Tenant Control Plane is made of pods running in the Kamaji Management Cluster. At this point, the Tenant Cluster has no worker nodes. So, the next step is to join some worker nodes to the Tenant Control Plane.
|
||||
|
||||
Kamaji is sticking to the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api) project contracts by providing a `ControlPlane` provider.
|
||||
An Azure-based cluster is not yet available: the available road-map is available on the [official repository](https://github.com/clastix/cluster-api-control-plane-provider-kamaji).
|
||||
Kamaji does not provide any helper for creation of tenant worker nodes, instead it leverages the [Cluster Management API](https://github.com/kubernetes-sigs/cluster-api). This allows you to create the Tenant Clusters, including worker nodes, in a completely declarative way. Currently, a Cluster API `ControlPlane` provider for Azure is not yet available: check the road-map on the [official repository](https://github.com/clastix/cluster-api-control-plane-provider-kamaji).
|
||||
|
||||
An alternative approach to create and join worker nodes in Azure is to manually create the VMs, turn them into Kubernetes worker nodes and then join through the `kubeadm` command.
|
||||
|
||||
Create an Azure VM Stateful Set to host worker nodes
|
||||
|
||||
@@ -296,7 +298,6 @@ az vmss create \
|
||||
--vnet-name $KAMAJI_VNET_NAME \
|
||||
--subnet $TENANT_SUBNET_NAME \
|
||||
--computer-name-prefix $TENANT_NAME- \
|
||||
--custom-data ./tenant-cloudinit.yaml \
|
||||
--load-balancer "" \
|
||||
--instance-count 0
|
||||
|
||||
@@ -311,15 +312,20 @@ az vmss scale \
|
||||
--new-capacity 3
|
||||
```
|
||||
|
||||
### Join worker nodes
|
||||
The current approach for joining nodes is to use `kubeadm` and therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable:
|
||||
Once all the machines are ready, follow the related [documentation](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) in order to:
|
||||
|
||||
- install `containerd` as container runtime
|
||||
- install `crictl`, the command line for working with `containerd`
|
||||
- install `kubectl`, `kubelet`, and `kubeadm` in the desired version
|
||||
|
||||
After the installation is complete on all the nodes, store the entire command of joining in a variable:
|
||||
|
||||
```bash
|
||||
TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."spec.loadBalancerIP")
|
||||
JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command |cut -d" " -f4-)
|
||||
```
|
||||
|
||||
A bash loop will be used to join all the available nodes.
|
||||
Use a loop to log in to and run the join command on each node:
|
||||
|
||||
```bash
|
||||
VMIDS=($(az vmss list-instances \
|
||||
@@ -364,7 +370,7 @@ As per [documentation](https://projectcalico.docs.tigera.io/reference/public-clo
|
||||
- `CALICO_IPV4POOL_IPIP="Never"`
|
||||
- `CALICO_IPV4POOL_VXLAN="Always"`
|
||||
|
||||
Apply to the tenant cluster:
|
||||
Apply to the Tenant Cluster:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig apply -f calico.yaml
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
# Manage tenant resources GitOps-way from the admin cluster
|
||||
# Manage Tenant Control Planes with GitOps
|
||||
|
||||
This guide describe a declarative way to deploy Kubernetes add-ons across multiple Tenant Clusters, the GitOps-way. An admin may need to apply a specific workload into Tenant Clusters and ensure is constantly reconciled, no matter what the tenants will do in their clusters. Examples include installing monitoring agents, ensuring specific policies, installing infrastructure operators like Cert Manager and so on.
|
||||
|
||||
This way the tenant resources can be ensured from a single pane of glass, from the *admin cluster*.
|
||||
This way the tenant resources can be ensured from a single pane of glass, from the *Management Cluster*.
|
||||
|
||||
## Flux as the GitOps operator
|
||||
|
||||
As GitOps ensures a constant reconciliation to a Git-versioned desired state, [Flux](https://fluxcd.io) can satisfy the requirement of those scenarios. In particular, the controllers that reconcile [resources](https://fluxcd.io/flux/concepts/#reconciliation) support communicating to external clusters.
|
||||
|
||||
In this scenario the Flux toolkit would run in the *admin cluster*, with reconcile controllers reconciling resources into *tenant clusters*.
|
||||
In this scenario the Flux toolkit would run in the *Management Cluster*, with reconcile controllers reconciling resources into *Tenant Clusters*.
|
||||
|
||||

|
||||
|
||||
@@ -29,7 +29,7 @@ tenant1 v1.25.1 Ready 172.18.0.2:31443 tenant1-admin-kubeconfig
|
||||
|
||||
> As the *admin* user has *cluster-admin* `ClusterRole` it will have the necessary privileges to operate on Custom Resources too.
|
||||
|
||||
Given that Flux it's installed in the *admin cluster* - guide [here](https://fluxcd.io/flux/installation/) - resources can be ensured for specifics tenant clusters, by filling the `spec.kubeConfig` field of the Flux reconciliation resource.
|
||||
Given that Flux it's installed in the *Management Cluster* - guide [here](https://fluxcd.io/flux/installation/) - resources can be ensured for specifics Tenant Clusters, by filling the `spec.kubeConfig` field of the Flux reconciliation resource.
|
||||
|
||||
For example, it might be needed to ensure [cert-manager](https://cert-manager.io/) is installed into a *tenant1* cluster with Helm. It can be done by declaring an `HelmRelease` as follows:
|
||||
|
||||
@@ -69,7 +69,7 @@ spec:
|
||||
replicaCount: 2
|
||||
```
|
||||
|
||||
and applying it in the *admin cluster*, alongside the related *jetstack* `HelmRepository`, in the *tenants* `Namespace`.
|
||||
and applying it in the *Management Cluster*, alongside the related *jetstack* `HelmRepository`, in the *tenants* `Namespace`.
|
||||
|
||||
The result would be having Cert Manager installed in the *default* `Namespace` of the tenant *tenant1*'s cluster:
|
||||
|
||||
@@ -82,7 +82,7 @@ tenant1-cert-manager-cainjector 1/1 1 1 4m3s
|
||||
tenant1-cert-manager-webhook 1/1 1 1 4m3s
|
||||
```
|
||||
|
||||
No matter what the tenant users will do on the *tenant cluster*, the Flux reconciliation controllers wirunning in the *admin cluster* will ensure the desired state declared by the reconciliation resources applied existing in the *admin cluster*, will be reconciled in the *tenant cluster*.
|
||||
No matter what the tenant users will do on the *Tenant Cluster*, the Flux reconciliation controllers wirunning in the *Management Cluster* will ensure the desired state declared by the reconciliation resources applied existing in the *Management Cluster*, will be reconciled in the *Tenant Cluster*.
|
||||
|
||||
Furthermore, this approach does not need to have in each tenant cluster nor Flux neither applied the related reconciliation Custom Resorces.
|
||||
Furthermore, this approach does not need to have in each Tenant Cluster nor Flux neither applied the related reconciliation Custom Resorces.
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Tenant Cluster Upgrade
|
||||
The process of upgrading a _“tenant cluster”_ consists in two steps:
|
||||
The process of upgrading a _“Tenant Cluster”_ consists in two steps:
|
||||
|
||||
1. Upgrade the Tenant Control Plane
|
||||
2. Upgrade of Tenant Worker Nodes
|
||||
@@ -14,6 +14,8 @@ apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: TenantControlPlane
|
||||
metadata:
|
||||
name: tenant-00
|
||||
labels:
|
||||
tenant.clastix.io: tenant-00
|
||||
spec:
|
||||
controlPlane:
|
||||
deployment:
|
||||
|
||||
|
Before Width: | Height: | Size: 152 KiB After Width: | Height: | Size: 163 KiB |
BIN
docs/content/images/console-ds-list.png
Normal file
|
After Width: | Height: | Size: 150 KiB |
BIN
docs/content/images/console-tcp-create.png
Normal file
|
After Width: | Height: | Size: 207 KiB |
BIN
docs/content/images/console-tcp-list.png
Normal file
|
After Width: | Height: | Size: 249 KiB |
BIN
docs/content/images/console-tcp-view.png
Normal file
|
After Width: | Height: | Size: 304 KiB |
@@ -1,20 +1,48 @@
|
||||
# Kamaji
|
||||
|
||||
**Kamaji** deploys and operates Kubernetes at scale with a fraction of the operational burden.
|
||||
**Kamaji** is a **Kubernetes Control Plane Manager**. It operates Kubernetes at scale with a fraction of the operational burden.
|
||||
|
||||
## How it works
|
||||
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. Kamaji is special because the Control Plane components are running in a single pod instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
|
||||
Kamaji turns any Kubernetes cluster into a _“Management Cluster”_ to orchestrate other Kubernetes clusters called _“Tenant Clusters”_. Kamaji is special because the Control Plane components are running inside pods instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
|
||||
|
||||
<img src="images/architecture.png" width="600">
|
||||
|
||||
View [Concepts](concepts.md) for a deeper understanding of principles behind Kamaji's design.
|
||||
|
||||
!!! info "CNCF Compliance"
|
||||
All the tenant clusters built with Kamaji are fully compliant [CNCF Certified Kubernetes](https://www.cncf.io/certification/software-conformance/) and are compatible with the standard toolchains everybody knows and loves.
|
||||
All the Tenant Clusters built with Kamaji are fully compliant [CNCF Certified Kubernetes](https://www.cncf.io/certification/software-conformance/) and are compatible with the standard toolchains everybody knows and loves.
|
||||
|
||||
## Getting started
|
||||
|
||||
Please refer to the [Getting Started guide](getting-started.md) to deploy a minimal setup of Kamaji.
|
||||
|
||||
|
||||
## FAQs
|
||||
Q. What does Kamaji mean?
|
||||
|
||||
A. Kamaji is named as the character _Kamajī_ (釜爺, lit. "Boiler Geezer") from the Japanese movie [_Spirited Away_](https://en.wikipedia.org/wiki/Spirited_Away). Kamajī is an elderly man with six, long arms who operates the boiler room of the Bathhouse. The silent professional, whom no one sees, but who gets the hot, fragrant water to all the guests, like our Kamaji provides Kubernetes as a service!
|
||||
|
||||
Q. Is Kamaji another Kubernetes distribution yet?
|
||||
|
||||
A. No, Kamaji is a Kubernetes Operator you can install on top of any Kubernetes cluster to provide hundreds or thousands of managed Kubernetes clusters as a service. The tenant clusters made with Kamaji are conformant CNCF Kubernetes clusters as we leverage [`kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
|
||||
|
||||
Q. How is Kamaji different from typical multi-cluster management solutions?
|
||||
|
||||
A. Most of the existing multi-cluster management solutions provision specific infrastructure for the control plane, in most cases dedicated machines. Kamaji is special because the control plane of the downstream clusters are regular pods running in the management cluster. This solution makes running control plane at scale cheaper and easier to deploy and operate.
|
||||
|
||||
Q. Is it safe to run Kubernetes control plane components in a pod instead of dedicated virtual machines?
|
||||
|
||||
A. Yes, the tenant control plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used, no forks!.
|
||||
|
||||
Q. How is Kamaji different from managed Kubernetes services offered by Public Clouds?
|
||||
|
||||
A. Kamaji gives you full control over all your Kubernetes infrastructures, offering unparalleled consistency across disparate environments: cloud, data-center, and edge while simplifying and centralizing operations, maintenance, and management tasks. Unlike other Managed Kubernetes services, Kamaji allows you to connect worker nodes from any infrastructure, providing you greater freedom, flexibility, and consistency than public Managed Kubernetes services.
|
||||
|
||||
Q. How Kamaji differs from Cluster API?
|
||||
|
||||
A. Kamaji and Cluster API complement each other. Kamaji's core idea is having a more efficient control plane management. Cluster API provides a declarative approach to clusters bootstrap and lifecycle management across different environments, cloud providers, and on-premises infrastructures. Thus combined together you get the best of class: Kamaji by simplifying the Control Plane management, Cluster API to abstract from the infrastructure. See supported [CAPI providers](guides/cluster-api.md) by Kamaji.
|
||||
|
||||
Q. You already provide a Kubernetes multi-tenancy solution with [Capsule](https://capsule.clastix.io). Why does Kamaji matter?
|
||||
|
||||
A. A multi-tenancy solution, like Capsule shares the Kubernetes control plane among all tenants keeping tenant namespaces isolated by policies. While the solution is the right choice by balancing between features and ease of usage, there are cases where a tenant user requires access to the control plane, for example, when a tenant requires to manage CRDs on his own. With Kamaji, you can provide full cluster admin permissions to the tenant.
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ Kamaji has been designed to operate a large scale of Kubernetes Tenant Control P
|
||||
In the Operator jargon, a manager is created to start several controllers, each one with their own responsibility.
|
||||
When a manager is started, all the underlying controllers are started, along with other "runnable" resources, like the webhook server.
|
||||
|
||||
Kamaji operates several reconciliation operations, both in the admin and tenant clusters.
|
||||
Kamaji operates several reconciliation operations, both in the admin and Tenant Clusters.
|
||||
With that said, a main manager is responsible to reconcile the admin resources (Deployment, Secret, ConfigMap, etc.), for each Tenant Control Plane a new manager will be spin-up as a main manager controller.
|
||||
These Tenant Control Plane managers, named in the code base as soot managers, in turn, start and run controllers to ensure the desired state of the underlying add-ons, and required resources such as kubeadm ones.
|
||||
|
||||
@@ -25,7 +25,7 @@ Your mileage may vary and just want to share with the community how it has been
|
||||
|
||||
## Infrastructure
|
||||
|
||||
The benchmark has been issued on a Kubernetes cluster backed by Elastic Kubernetes Service used as an Admin cluster.
|
||||
The benchmark has been issued on a Kubernetes cluster backed by Elastic Kubernetes Service used as Management Cluster.
|
||||
|
||||
Two node pools have been created to avoid the noisy neighbour effect, and to increase the performances:
|
||||
|
||||
@@ -191,6 +191,8 @@ kind: TenantControlPlane
|
||||
metadata:
|
||||
name: benchmark$I
|
||||
namespace: $NS
|
||||
labels:
|
||||
tenant.clastix.io: benchmark$I
|
||||
spec:
|
||||
dataStore: $DS
|
||||
controlPlane:
|
||||
@@ -236,4 +238,4 @@ If you're encountering different results, please, engage with the community to s
|
||||
|
||||
# Running a thousand of Tenant Control Planes using multiple DataStores
|
||||
|
||||
The next benchmark must address the use case where a Kamaji admin cluster manages up to a thousand Tenant Control Plane instances.
|
||||
The next benchmark must address the use case where a Kamaji Management Cluster manages up to a thousand Tenant Control Plane instances.
|
||||
|
||||
@@ -4,22 +4,24 @@ Currently, **Kamaji** allows customization using CLI flags for the `manager` sub
|
||||
|
||||
Available flags are the following:
|
||||
|
||||
| Flag | Usage | Default |
|
||||
| ---- | ------ | --- |
|
||||
| `--metrics-bind-address` | The address the metric endpoint binds to. | `:8080` |
|
||||
| `--health-probe-bind-address` | The address the probe endpoint binds to. | `:8081` |
|
||||
| `--leader-elect` | Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. | `true` |
|
||||
| `--tmp-directory` | Directory which will be used to work with temporary files. | `/tmp/kamaji` |
|
||||
| `--kine-image` | Container image along with tag to use for the Kine sidecar container (used only if etcd-storage-type is set to one of kine strategies). | `rancher/kine:v0.9.2-amd64` |
|
||||
| `--datastore` | The default DataStore that should be used by Kamaji to setup the required storage. | `etcd` |
|
||||
| `--migrate-image` | Specify the container image to launch when a TenantControlPlane is migrated to a new datastore. | `migrate-image` |
|
||||
| `--max-concurrent-tcp-reconciles` | Specify the number of workers for the Tenant Control Plane controller (beware of CPU consumption). | `1` |
|
||||
| `--pod-namespace` | The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs. | `os.Getenv("POD_NAMESPACE")` |
|
||||
| `--webhook-service-name` | The Kamaji webhook server Service name which is used to get validation webhooks, required for the TenantControlPlane migration jobs. | `kamaji-webhook-service` |
|
||||
| `--serviceaccount-name` | The Kubernetes ServiceAccount used by the Operator, required for the TenantControlPlane migration jobs. | `os.Getenv("SERVICE_ACCOUNT")` |
|
||||
| `--webhook-ca-path` | Path to the Manager webhook server CA, required for the TenantControlPlane migration jobs. | `/tmp/k8s-webhook-server/serving-certs/ca.crt` |
|
||||
| `--zap-devel` | Development Mode (encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode (encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error). | `true` |
|
||||
| `--zap-encoder` | Zap log encoding, one of 'json' or 'console' | `console` |
|
||||
| `--zap-log-level` | Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity | `info` |
|
||||
| `--zap-stacktrace-level` | Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic'). | `info` |
|
||||
| `--zap-time-encoding` | Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano') | `epoch` |
|
||||
| Flag | Usage | Default |
|
||||
|-----------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------|
|
||||
| `--metrics-bind-address` | The address the metric endpoint binds to. | `:8080` |
|
||||
| `--health-probe-bind-address` | The address the probe endpoint binds to. | `:8081` |
|
||||
| `--leader-elect` | Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. | `true` |
|
||||
| `--tmp-directory` | Directory which will be used to work with temporary files. | `/tmp/kamaji` |
|
||||
| `--kine-image` | Container image along with tag to use for the Kine sidecar container (used only if etcd-storage-type is set to one of kine strategies). | `rancher/kine:v0.9.2-amd64` |
|
||||
| `--datastore` | The default DataStore that should be used by Kamaji to setup the required storage. | `etcd` |
|
||||
| `--migrate-image` | Specify the container image to launch when a TenantControlPlane is migrated to a new datastore. | `migrate-image` |
|
||||
| `--max-concurrent-tcp-reconciles` | Specify the number of workers for the Tenant Control Plane controller (beware of CPU consumption). | `1` |
|
||||
| `--pod-namespace` | The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs. | `os.Getenv("POD_NAMESPACE")` |
|
||||
| `--webhook-service-name` | The Kamaji webhook server Service name which is used to get validation webhooks, required for the TenantControlPlane migration jobs. | `kamaji-webhook-service` |
|
||||
| `--serviceaccount-name` | The Kubernetes ServiceAccount used by the Operator, required for the TenantControlPlane migration jobs. | `os.Getenv("SERVICE_ACCOUNT")` |
|
||||
| `--webhook-ca-path` | Path to the Manager webhook server CA, required for the TenantControlPlane migration jobs. | `/tmp/k8s-webhook-server/serving-certs/ca.crt` |
|
||||
| `--controller-reconcile-timeout` | The reconciliation request timeout before the controller withdraw the external resource calls, such as dealing with the Datastore, or the Tenant Control Plane API endpoint. | `30s` |
|
||||
| `--cache-resync-period` | The controller-runtime.Manager cache resync period. | `10h` |
|
||||
| `--zap-devel` | Development Mode (encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode (encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error). | `true` |
|
||||
| `--zap-encoder` | Zap log encoding, one of 'json' or 'console' | `console` |
|
||||
| `--zap-log-level` | Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity | `info` |
|
||||
| `--zap-stacktrace-level` | Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic'). | `info` |
|
||||
| `--zap-time-encoding` | Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano') | `epoch` |
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
# Conformance
|
||||
# CNCF Conformance
|
||||
For organizations using Kubernetes, conformance enables interoperability, consistency, and confirmability between Kubernetes installations. The Cloud Computing Native Foundation - CNCF - provides the [Certified Kubernetes Conformance Program](https://www.cncf.io/certification/software-conformance/).
|
||||
|
||||
The standard set of conformance tests is currently those defined by the `[Conformance]` tag in the
|
||||
[kubernetes e2e](https://github.com/kubernetes/kubernetes/tree/master/test/e2e) suite.
|
||||
|
||||
All the _“tenant clusters”_ built with Kamaji are CNCF conformant:
|
||||
All the _“Tenant Clusters”_ built with Kamaji are CNCF conformant:
|
||||
|
||||
- [v1.23](https://github.com/cncf/k8s-conformance/pull/2194)
|
||||
- [v1.24](https://github.com/cncf/k8s-conformance/pull/2193)
|
||||
- [v1.25](https://github.com/cncf/k8s-conformance/pull/2188)
|
||||
- [v1.26](https://github.com/cncf/k8s-conformance/pull/2787)
|
||||
- [v1.27](https://github.com/cncf/k8s-conformance/pull/2786)
|
||||
- [v1.28](https://github.com/cncf/k8s-conformance/pull/2785)
|
||||
|
||||
<p align="left" style="padding: 6px 6px">
|
||||
<img src="https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/certified-kubernetes/versionless/color/certified-kubernetes-color.png" width="100" />
|
||||
@@ -21,13 +24,13 @@ regularly built and kept up to date to execute against all currently supported v
|
||||
|
||||
Download a [binary release](https://github.com/vmware-tanzu/sonobuoy/releases) of the CLI.
|
||||
|
||||
Make sure to access your tenant cluster:
|
||||
Make sure to access your Tenant Cluster:
|
||||
|
||||
```
|
||||
export KUBECONFIG=tenant.kubeconfig
|
||||
```
|
||||
|
||||
Deploy a Sonobuoy pod to your tenant cluster with:
|
||||
Deploy a Sonobuoy pod to your Tenant Cluster with:
|
||||
|
||||
```
|
||||
sonobuoy run --mode=certified-conformance
|
||||
|
||||
@@ -2,11 +2,16 @@
|
||||
|
||||
In Kamaji, there are different components that might require independent versioning and support level:
|
||||
|
||||
| Kamaji | Admin Cluster | Tenant Cluster |
|
||||
|--------|---------------|----------------------|
|
||||
| v0.0 | v1.22+ | [v1.21.0 .. v1.23.5] |
|
||||
| v0.1 | v1.22+ | [v1.21.0 .. v1.25.0] |
|
||||
| v0.2 | v1.22+ | [v1.21.0 .. v1.27.0] |
|
||||
| v0.3.0 | v1.22+ | [v1.21.0 .. v1.27.0] |
|
||||
| v0.3.1 | v1.22+ | [v1.21.0 .. v1.27.3] |
|
||||
| v0.3.2 | v1.22+ | [v1.21.0 .. v1.27.3] |
|
||||
| Kamaji | Management Cluster | Tenant Cluster |
|
||||
|--------|--------------------|----------------------|
|
||||
| v0.0 | v1.22+ | [v1.21.0 .. v1.23.5] |
|
||||
| v0.1 | v1.22+ | [v1.21.0 .. v1.25.0] |
|
||||
| v0.2 | v1.22+ | [v1.21.0 .. v1.27.0] |
|
||||
| v0.3.0 | v1.22+ | [v1.21.0 .. v1.27.0] |
|
||||
| v0.3.1 | v1.22+ | [v1.21.0 .. v1.27.3] |
|
||||
| v0.3.2 | v1.22+ | [v1.21.0 .. v1.27.3] |
|
||||
| v0.3.3 | v1.22+ | [v1.21.0 .. v1.27.3] |
|
||||
| v0.3.4 | v1.22+ | [v1.21.0 .. v1.28.1] |
|
||||
| v0.3.5 | v1.22+ | [v1.21.0 .. v1.28.1] |
|
||||
| v0.3.5 | v1.22+ | [v1.21.0 .. v1.28.1] |
|
||||
| v0.4.0 | v1.22+ | [v1.21.0 .. v1.29.0] |
|
||||
|
||||
@@ -65,6 +65,9 @@ nav:
|
||||
- guides/upgrade.md
|
||||
- guides/datastore-migration.md
|
||||
- guides/backup-and-restore.md
|
||||
- guides/certs-lifecycle.md
|
||||
- guides/cluster-api.md
|
||||
- guides/console.md
|
||||
- 'Use Cases': use-cases.md
|
||||
- 'Reference':
|
||||
- reference/index.md
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
@@ -44,7 +44,7 @@ var _ = BeforeSuite(func() {
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
UseExistingCluster: pointer.Bool(true),
|
||||
UseExistingCluster: pointer.To(true),
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -26,7 +27,7 @@ var _ = Describe("Deploy a TenantControlPlane resource with additional resources
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
AdditionalInitContainers: []corev1.Container{{
|
||||
Name: initContainerName,
|
||||
Image: initContainerImage,
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/internal/utilities"
|
||||
@@ -42,7 +43,7 @@ var _ = Describe("Deploy a TenantControlPlane resource with additional options",
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
AdditionalInitContainers: []corev1.Container{{
|
||||
Name: initContainerName,
|
||||
Image: initContainerImage,
|
||||
@@ -255,7 +256,7 @@ var _ = Describe("Deploy a TenantControlPlane resource with additional options",
|
||||
}, &deploy)).NotTo(HaveOccurred())
|
||||
|
||||
return deploy.Spec.Template.Spec.InitContainers
|
||||
}, 10*time.Second, time.Second).Should(HaveLen(0), "Deployment should not contain anymore the init container")
|
||||
}, 10*time.Second, time.Second).Should(BeEmpty(), "Deployment should not contain anymore the init container")
|
||||
|
||||
Eventually(func() bool {
|
||||
deploy := appsv1.Deployment{}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
@@ -35,7 +36,7 @@ var _ = Describe("When migrating a Tenant Control Plane to another datastore", f
|
||||
DataStore: "etcd-bronze",
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "NodePort",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -24,7 +25,7 @@ var _ = Describe("Deploy a TenantControlPlane resource with the MySQL driver", f
|
||||
DataStore: "mysql-bronze",
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -24,7 +25,7 @@ var _ = Describe("Deploy a TenantControlPlane resource with the PostgreSQL drive
|
||||
DataStore: "postgresql-bronze",
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -23,7 +24,7 @@ var _ = Describe("Deploy a TenantControlPlane resource", func() {
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -26,7 +27,7 @@ var _ = Describe("Deploy a TenantControlPlane with wrong preferred kubelet addre
|
||||
DataStore: "default",
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
@@ -62,7 +63,7 @@ var _ = Describe("Deploy a TenantControlPlane with wrong preferred kubelet addre
|
||||
DataStore: "default",
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -25,7 +26,7 @@ var _ = Describe("downgrade of a TenantControlPlane Kubernetes version", func()
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -25,7 +26,7 @@ var _ = Describe("non-linear minor upgrade of a TenantControlPlane Kubernetes ve
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/internal/upgrade"
|
||||
@@ -35,7 +36,7 @@ var _ = Describe("using an unsupported TenantControlPlane Kubernetes version", f
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
@@ -63,7 +64,7 @@ var _ = Describe("using an unsupported TenantControlPlane Kubernetes version", f
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
@@ -19,8 +19,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
kubeadmv1beta3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/cmd"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -43,7 +44,7 @@ var _ = Describe("starting a kind worker with kubeadm", func() {
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "NodePort",
|
||||
@@ -120,7 +121,7 @@ var _ = Describe("starting a kind worker with kubeadm", func() {
|
||||
var joinCommandBuffer *bytes.Buffer
|
||||
|
||||
By("generating kubeadm join command", func() {
|
||||
joinCommandBuffer = bytes.NewBuffer([]byte(""))
|
||||
joinCommandBuffer = bytes.NewBufferString("")
|
||||
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigFile.Name())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -128,7 +129,7 @@ var _ = Describe("starting a kind worker with kubeadm", func() {
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(cmd.RunCreateToken(joinCommandBuffer, clientset, "", util.DefaultInitConfiguration(), true, "", kubeconfigFile.Name())).ToNot(HaveOccurred())
|
||||
Expect(cmd.RunCreateToken(joinCommandBuffer, clientset, "", &kubeadmv1beta3.InitConfiguration{}, true, "", kubeconfigFile.Name())).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
By("executing the command in the worker node", func() {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -38,7 +39,7 @@ var _ = Describe("validating kubeconfig", func() {
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "NodePort",
|
||||
|
||||
219
go.mod
@@ -1,102 +1,96 @@
|
||||
module github.com/clastix/kamaji
|
||||
|
||||
go 1.18
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/JamesStewy/go-mysqldump v0.2.2
|
||||
github.com/blang/semver v3.5.1+incompatible
|
||||
github.com/go-logr/logr v1.2.3
|
||||
github.com/go-logr/logr v1.3.0
|
||||
github.com/go-pg/pg/v10 v10.10.6
|
||||
github.com/go-sql-driver/mysql v1.6.0
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/juju/mutex/v2 v2.0.0
|
||||
github.com/onsi/ginkgo/v2 v2.6.0
|
||||
github.com/onsi/gomega v1.24.1
|
||||
github.com/onsi/ginkgo/v2 v2.13.0
|
||||
github.com/onsi/gomega v1.29.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.10.1
|
||||
github.com/testcontainers/testcontainers-go v0.13.0
|
||||
go.etcd.io/etcd/api/v3 v3.5.6
|
||||
go.etcd.io/etcd/client/v3 v3.5.6
|
||||
go.etcd.io/etcd/api/v3 v3.5.10
|
||||
go.etcd.io/etcd/client/v3 v3.5.10
|
||||
go.uber.org/automaxprocs v1.5.1
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0
|
||||
k8s.io/api v0.26.1
|
||||
k8s.io/apimachinery v0.26.1
|
||||
k8s.io/apiserver v0.26.1
|
||||
k8s.io/client-go v0.26.1
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0
|
||||
k8s.io/api v0.29.0
|
||||
k8s.io/apimachinery v0.29.0
|
||||
k8s.io/apiserver v0.29.0
|
||||
k8s.io/client-go v0.29.0
|
||||
k8s.io/cluster-bootstrap v0.0.0
|
||||
k8s.io/klog/v2 v2.80.1
|
||||
k8s.io/klog/v2 v2.110.1
|
||||
k8s.io/kubelet v0.0.0
|
||||
k8s.io/kubernetes v1.26.1
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
|
||||
sigs.k8s.io/controller-runtime v0.14.0
|
||||
k8s.io/kubernetes v1.29.0
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
||||
sigs.k8s.io/controller-runtime v0.16.3
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.99.0 // indirect
|
||||
cloud.google.com/go/storage v1.18.2 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.4.17 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.23 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.25 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20211216145620-d92e9ce0af51 // indirect
|
||||
github.com/containerd/cgroups v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/containerd/containerd v1.5.9 // indirect
|
||||
github.com/coredns/caddy v1.1.0 // indirect
|
||||
github.com/coredns/corefile-migration v1.0.17 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/coredns/caddy v1.1.1 // indirect
|
||||
github.com/coredns/corefile-migration v1.0.21 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.11+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-errors/errors v1.0.1 // indirect
|
||||
github.com/go-logr/zapr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-logr/zapr v1.2.4 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-pg/zerochecker v0.2.0 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/lithammer/dedent v1.1.0 // indirect
|
||||
github.com/magiconair/properties v1.8.5 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||
github.com/moby/sys/mount v0.2.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
@@ -104,16 +98,16 @@ require (
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/opencontainers/runc v1.1.10 // indirect
|
||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/afero v1.7.0 // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/spf13/afero v1.9.2 // indirect
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
@@ -122,72 +116,77 @@ require (
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.4 // indirect
|
||||
github.com/vmihailenco/tagparser v0.1.2 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
github.com/xlab/treeprint v1.1.0 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.6 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.24.0 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/term v0.3.0 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.25.0 // indirect
|
||||
golang.org/x/crypto v0.14.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.17.0 // indirect
|
||||
golang.org/x/oauth2 v0.10.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
golang.org/x/term v0.13.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/api v0.63.0 // indirect
|
||||
golang.org/x/tools v0.12.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
|
||||
google.golang.org/grpc v1.49.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/grpc v1.58.3 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.66.2 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.26.1 // indirect
|
||||
k8s.io/cli-runtime v0.26.1 // indirect
|
||||
k8s.io/component-base v0.26.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.29.0 // indirect
|
||||
k8s.io/cli-runtime v0.29.0 // indirect
|
||||
k8s.io/component-base v0.29.0 // indirect
|
||||
k8s.io/component-helpers v0.0.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
|
||||
k8s.io/kube-proxy v0.0.0 // indirect
|
||||
k8s.io/system-validators v1.8.0 // indirect
|
||||
mellium.im/sasl v0.3.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.12.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
k8s.io/api => k8s.io/api v0.26.1
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.1
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.26.1
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.26.1
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.1
|
||||
k8s.io/client-go => k8s.io/client-go v0.26.1
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.1
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.1
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.26.1
|
||||
k8s.io/component-base => k8s.io/component-base v0.26.1
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.26.1
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.26.1
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.26.1
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.1
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.26.1
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.1
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.1
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.1
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.1
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.26.1
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.26.1
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.1
|
||||
k8s.io/metrics => k8s.io/metrics v0.26.1
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.26.1
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.1
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.1
|
||||
k8s.io/api => k8s.io/api v0.29.0
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.0
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.29.0
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.29.0
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.29.0
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.0
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.0
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.29.0
|
||||
k8s.io/component-base => k8s.io/component-base v0.29.0
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.29.0
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.29.0
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.29.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.0
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.0
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.29.0
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.0
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.0
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.0
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.0
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.29.0
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.29.0
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.0
|
||||
k8s.io/metrics => k8s.io/metrics v0.29.0
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.29.0
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.0
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.0
|
||||
)
|
||||
|
||||
replace github.com/JamesStewy/go-mysqldump => github.com/vtoma/go-mysqldump v1.0.0
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
@@ -224,7 +224,7 @@ func (d Deployment) buildPKIVolume(podSpec *corev1.PodSpec, tcp kamajiv1alpha1.T
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Projected: &corev1.ProjectedVolumeSource{
|
||||
Sources: sources,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -240,7 +240,7 @@ func (d Deployment) buildCAVolume(podSpec *corev1.PodSpec, tcp kamajiv1alpha1.Te
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.Certificates.CA.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -256,7 +256,7 @@ func (d Deployment) buildSSLCertsVolume(podSpec *corev1.PodSpec, tcp kamajiv1alp
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.Certificates.CA.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func (d Deployment) buildShareCAVolume(podSpec *corev1.PodSpec, tcp kamajiv1alph
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.Certificates.CA.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -288,7 +288,7 @@ func (d Deployment) buildLocalShareCAVolume(podSpec *corev1.PodSpec, tcp kamajiv
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.Certificates.CA.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -304,7 +304,7 @@ func (d Deployment) buildSchedulerVolume(podSpec *corev1.PodSpec, tcp kamajiv1al
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.KubeConfig.Scheduler.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -320,7 +320,7 @@ func (d Deployment) buildControllerManagerVolume(podSpec *corev1.PodSpec, tcp ka
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.KubeConfig.ControllerManager.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -776,7 +776,7 @@ func (d Deployment) buildKineVolume(podSpec *corev1.PodSpec, tcp kamajiv1alpha1.
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.Storage.Certificate.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
// Adding the volume to read Kine certificates:
|
||||
@@ -928,12 +928,12 @@ func (d Deployment) setSelector(deploymentSpec *appsv1.DeploymentSpec, tcp kamaj
|
||||
}
|
||||
|
||||
func (d Deployment) setReplicas(deploymentSpec *appsv1.DeploymentSpec, tcp kamajiv1alpha1.TenantControlPlane) {
|
||||
deploymentSpec.Replicas = pointer.Int32(tcp.Spec.ControlPlane.Deployment.Replicas)
|
||||
deploymentSpec.Replicas = tcp.Spec.ControlPlane.Deployment.Replicas
|
||||
}
|
||||
|
||||
func (d Deployment) setRuntimeClass(spec *corev1.PodSpec, tcp kamajiv1alpha1.TenantControlPlane) {
|
||||
if len(tcp.Spec.ControlPlane.Deployment.RuntimeClassName) > 0 {
|
||||
spec.RuntimeClassName = pointer.String(tcp.Spec.ControlPlane.Deployment.RuntimeClassName)
|
||||
spec.RuntimeClassName = pointer.To(tcp.Spec.ControlPlane.Deployment.RuntimeClassName)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/internal/utilities"
|
||||
@@ -186,7 +186,7 @@ func (k Konnectivity) buildVolumeMounts(podSpec *corev1.PodSpec) {
|
||||
|
||||
podSpec.Containers[index].Args = utilities.ArgsFromMapToSlice(args)
|
||||
|
||||
vFound, vIndex := false, 0
|
||||
vFound, vIndex := false, 0 //nolint:wastedassign
|
||||
// Patching the volume mounts
|
||||
if vFound, vIndex = utilities.HasNamedVolumeMount(podSpec.Containers[index].VolumeMounts, konnectivityUDSVolume); !vFound {
|
||||
vIndex = len(podSpec.Containers[index].VolumeMounts)
|
||||
@@ -208,9 +208,8 @@ func (k Konnectivity) buildVolumeMounts(podSpec *corev1.PodSpec) {
|
||||
}
|
||||
|
||||
func (k Konnectivity) buildVolumes(status kamajiv1alpha1.KonnectivityStatus, podSpec *corev1.PodSpec) {
|
||||
found, index := false, 0
|
||||
// Defining volumes for the UDS socket
|
||||
found, index = utilities.HasNamedVolume(podSpec.Volumes, konnectivityUDSVolume)
|
||||
found, index := utilities.HasNamedVolume(podSpec.Volumes, konnectivityUDSVolume)
|
||||
if !found {
|
||||
index = len(podSpec.Volumes)
|
||||
podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{})
|
||||
@@ -235,7 +234,7 @@ func (k Konnectivity) buildVolumes(status kamajiv1alpha1.KonnectivityStatus, pod
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: status.ConfigMap.Name,
|
||||
},
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
// Defining volume for the Konnectivity kubeconfig
|
||||
@@ -249,13 +248,13 @@ func (k Konnectivity) buildVolumes(status kamajiv1alpha1.KonnectivityStatus, pod
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: status.Kubeconfig.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (k Konnectivity) Build(deployment *appsv1.Deployment, tenantControlPlane kamajiv1alpha1.TenantControlPlane) {
|
||||
k.buildKonnectivityContainer(tenantControlPlane.Spec.Addons.Konnectivity, tenantControlPlane.Spec.ControlPlane.Deployment.Replicas, &deployment.Spec.Template.Spec)
|
||||
k.buildKonnectivityContainer(tenantControlPlane.Spec.Addons.Konnectivity, *tenantControlPlane.Spec.ControlPlane.Deployment.Replicas, &deployment.Spec.Template.Spec)
|
||||
k.buildVolumeMounts(&deployment.Spec.Template.Spec)
|
||||
k.buildVolumes(tenantControlPlane.Status.Addons.Konnectivity, &deployment.Spec.Template.Spec)
|
||||
|
||||
|
||||
@@ -9,4 +9,5 @@ const (
|
||||
|
||||
ControlPlaneLabelKey = "kamaji.clastix.io/name"
|
||||
ControlPlaneLabelResource = "kamaji.clastix.io/component"
|
||||
ControllerLabelResource = "kamaji.clastix.io/certificate_lifecycle_controller"
|
||||
)
|
||||
|
||||
@@ -194,9 +194,11 @@ func generateCertificateKeyPairBytes(template *x509.Certificate, caCert *x509.Ce
|
||||
}
|
||||
|
||||
func checkCertificateValidity(cert x509.Certificate) bool {
|
||||
now := time.Now()
|
||||
// Avoiding waiting for the exact expiration date by creating a one-day gap
|
||||
notAfter := cert.NotAfter.After(time.Now().AddDate(0, 0, 1))
|
||||
notBefore := cert.NotBefore.Before(time.Now())
|
||||
|
||||
return now.Before(cert.NotAfter) && now.After(cert.NotBefore)
|
||||
return notAfter && notBefore
|
||||
}
|
||||
|
||||
func checkPublicKeys(a rsa.PublicKey, b rsa.PublicKey) bool {
|
||||
|
||||
@@ -29,7 +29,7 @@ func NewStorageConnection(ctx context.Context, client client.Client, ds kamajiv1
|
||||
return NewMySQLConnection(*cc)
|
||||
case kamajiv1alpha1.KinePostgreSQLDriver:
|
||||
cc.TLSConfig.ServerName = cc.Endpoints[0].Host
|
||||
//nolint:contextcheck
|
||||
|
||||
return NewPostgreSQLConnection(*cc)
|
||||
case kamajiv1alpha1.EtcdDriver:
|
||||
return NewETCDConnection(*cc)
|
||||
|
||||
@@ -136,7 +136,7 @@ func (e *EtcdClient) DeleteDB(ctx context.Context, dbName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EtcdClient) RevokePrivileges(ctx context.Context, user, dbName string) error {
|
||||
func (e *EtcdClient) RevokePrivileges(ctx context.Context, _, dbName string) error {
|
||||
if _, err := e.Client.Auth.RoleDelete(ctx, dbName); err != nil {
|
||||
return errors.NewRevokePrivilegesError(err)
|
||||
}
|
||||
|
||||
@@ -216,13 +216,17 @@ func (c *MySQLConnection) DBExists(ctx context.Context, dbName string) (bool, er
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
func (c *MySQLConnection) GrantPrivilegesExists(ctx context.Context, user, dbName string) (bool, error) {
|
||||
func (c *MySQLConnection) GrantPrivilegesExists(_ context.Context, user, dbName string) (bool, error) {
|
||||
statementShowGrantsStatement := fmt.Sprintf(mysqlShowGrantsStatement, user)
|
||||
rows, err := c.db.Query(statementShowGrantsStatement)
|
||||
rows, err := c.db.Query(statementShowGrantsStatement) //nolint:sqlclosecheck
|
||||
if err != nil {
|
||||
return false, errors.NewGrantPrivilegesError(err)
|
||||
}
|
||||
|
||||
if err = rows.Err(); err != nil {
|
||||
return false, errors.NewGrantPrivilegesError(err)
|
||||
}
|
||||
|
||||
expected := fmt.Sprintf(mysqlGrantPrivilegesStatement, user, dbName)
|
||||
var grant string
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ func (r *PostgreSQLConnection) Migrate(ctx context.Context, tcp kamajiv1alpha1.T
|
||||
// Dumping the old datastore in a local buffer
|
||||
var buf bytes.Buffer
|
||||
|
||||
if _, err := r.switchDatabaseFn(tcp.Status.Storage.Setup.Schema).WithContext(ctx).CopyTo(&buf, "COPY kine TO STDOUT"); err != nil { //nolint:contextcheck
|
||||
if _, err := r.switchDatabaseFn(tcp.Status.Storage.Setup.Schema).WithContext(ctx).CopyTo(&buf, "COPY kine TO STDOUT"); err != nil {
|
||||
return fmt.Errorf("unable to copy from the origin datastore: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -107,7 +107,7 @@ func GeneratePublicKeyPrivateKeyPair(baseName string, config *Configuration) (*P
|
||||
}
|
||||
|
||||
func initPhaseCertsSA(config *Configuration) error {
|
||||
return certs.CreateServiceAccountKeyAndPublicKeyFiles(config.InitConfiguration.CertificatesDir, config.InitConfiguration.PublicKeyAlgorithm())
|
||||
return certs.CreateServiceAccountKeyAndPublicKeyFiles(config.InitConfiguration.CertificatesDir, config.InitConfiguration.EncryptionAlgorithmType())
|
||||
}
|
||||
|
||||
func initPhaseFromCA(kubeadmCert *certs.KubeadmCert, config *Configuration, certificate *x509.Certificate, signer crypto.Signer) error {
|
||||
|
||||
@@ -64,9 +64,9 @@ func CreateKubeadmInitConfiguration(params Parameters) (*Configuration, error) {
|
||||
fmt.Sprintf("%s.%s.svc.cluster.local", params.TenantControlPlaneName, params.TenantControlPlaneNamespace),
|
||||
params.TenantControlPlaneAddress,
|
||||
}, params.TenantControlPlaneCertSANs...)
|
||||
conf.APIServer.ControlPlaneComponent.ExtraArgs = map[string]string{
|
||||
"etcd-compaction-interval": "0s",
|
||||
"etcd-prefix": fmt.Sprintf("/%s", params.TenantControlPlaneName),
|
||||
conf.APIServer.ControlPlaneComponent.ExtraArgs = []kubeadmapi.Arg{
|
||||
{Name: "etcd-compaction-interval", Value: "0s"},
|
||||
{Name: "etcd-prefix", Value: fmt.Sprintf("/%s", params.TenantControlPlaneName)},
|
||||
}
|
||||
conf.ClusterName = params.TenantControlPlaneName
|
||||
|
||||
|
||||
@@ -10,6 +10,9 @@ import (
|
||||
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
|
||||
|
||||
"github.com/clastix/kamaji/internal/crypto"
|
||||
"github.com/clastix/kamaji/internal/utilities"
|
||||
)
|
||||
|
||||
func buildCertificateDirectoryWithCA(ca CertificatePrivateKeyPair, directory string) error {
|
||||
@@ -23,11 +26,8 @@ func buildCertificateDirectoryWithCA(ca CertificatePrivateKeyPair, directory str
|
||||
}
|
||||
|
||||
keyPath := path.Join(directory, kubeadmconstants.CAKeyName)
|
||||
if err := os.WriteFile(keyPath, ca.PrivateKey, os.FileMode(0o600)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return os.WriteFile(keyPath, ca.PrivateKey, os.FileMode(0o600))
|
||||
}
|
||||
|
||||
func CreateKubeconfig(kubeconfigName string, ca CertificatePrivateKeyPair, config *Configuration) ([]byte, error) {
|
||||
@@ -46,6 +46,13 @@ func CreateKubeconfig(kubeconfigName string, ca CertificatePrivateKeyPair, confi
|
||||
return os.ReadFile(path)
|
||||
}
|
||||
|
||||
func IsKubeconfigValid(kubeconfigBytes []byte) bool {
|
||||
return len(kubeconfigBytes) > 0
|
||||
func IsKubeconfigValid(bytes []byte) bool {
|
||||
kc, err := utilities.DecodeKubeconfigYAML(bytes)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
ok, _ := crypto.IsValidCertificateKeyPairBytes(kc.AuthInfos[0].AuthInfo.ClientCertificateData, kc.AuthInfos[0].AuthInfo.ClientKeyData)
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -11,28 +11,28 @@ import (
|
||||
|
||||
type Discard struct{}
|
||||
|
||||
func (d Discard) PrintObj(obj runtime.Object, writer io.Writer) error {
|
||||
func (d Discard) PrintObj(runtime.Object, io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d Discard) Fprintf(writer io.Writer, format string, args ...interface{}) (n int, err error) {
|
||||
func (d Discard) Fprintf(io.Writer, string, ...interface{}) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (d Discard) Fprintln(writer io.Writer, args ...interface{}) (n int, err error) {
|
||||
func (d Discard) Fprintln(io.Writer, ...interface{}) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (d Discard) Printf(format string, args ...interface{}) (n int, err error) {
|
||||
func (d Discard) Printf(string, ...interface{}) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (d Discard) Println(args ...interface{}) (n int, err error) {
|
||||
func (d Discard) Println(...interface{}) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (d Discard) Flush(writer io.Writer, last bool) {
|
||||
func (d Discard) Flush(io.Writer, bool) {
|
||||
}
|
||||
|
||||
func (d Discard) Close(writer io.Writer) {
|
||||
func (d Discard) Close(io.Writer) {
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
"github.com/clastix/kamaji/internal/utilities"
|
||||
)
|
||||
@@ -81,10 +81,10 @@ func getKubeletConfigmapContent(kubeletConfiguration KubeletConfiguration) ([]by
|
||||
},
|
||||
Authentication: kubelettypes.KubeletAuthentication{
|
||||
Anonymous: kubelettypes.KubeletAnonymousAuthentication{
|
||||
Enabled: pointer.Bool(false),
|
||||
Enabled: pointer.To(false),
|
||||
},
|
||||
Webhook: kubelettypes.KubeletWebhookAuthentication{
|
||||
Enabled: pointer.Bool(true),
|
||||
Enabled: pointer.To(true),
|
||||
CacheTTL: zeroDuration,
|
||||
},
|
||||
X509: kubelettypes.KubeletX509Authentication{
|
||||
@@ -110,9 +110,9 @@ func getKubeletConfigmapContent(kubeletConfiguration KubeletConfiguration) ([]by
|
||||
EvictionPressureTransitionPeriod: zeroDuration,
|
||||
FileCheckFrequency: zeroDuration,
|
||||
HealthzBindAddress: "127.0.0.1",
|
||||
HealthzPort: pointer.Int32(10248),
|
||||
HealthzPort: pointer.To(int32(10248)),
|
||||
HTTPCheckFrequency: zeroDuration,
|
||||
ImageGCHighThresholdPercent: pointer.Int32(100),
|
||||
ImageGCHighThresholdPercent: pointer.To(int32(100)),
|
||||
NodeStatusUpdateFrequency: zeroDuration,
|
||||
NodeStatusReportFrequency: zeroDuration,
|
||||
RotateCertificates: true,
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
@@ -287,7 +287,7 @@ func (k *KubeProxy) mutateDaemonSet(ctx context.Context, tenantClient client.Cli
|
||||
ds.Spec.Template.Spec.Volumes[0].Name = k.daemonSet.Spec.Template.Spec.Volumes[0].Name
|
||||
ds.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap = &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{Name: k.daemonSet.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.Name},
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
}
|
||||
|
||||
ds.Spec.Template.Spec.Volumes[1].Name = k.daemonSet.Spec.Template.Spec.Volumes[1].Name
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/internal/constants"
|
||||
"github.com/clastix/kamaji/internal/crypto"
|
||||
"github.com/clastix/kamaji/internal/kubeadm"
|
||||
"github.com/clastix/kamaji/internal/utilities"
|
||||
@@ -93,6 +94,19 @@ func (r *APIServerCertificate) mutate(ctx context.Context, tenantControlPlane *k
|
||||
return err
|
||||
}
|
||||
|
||||
r.resource.SetLabels(utilities.MergeMaps(
|
||||
utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()),
|
||||
map[string]string{
|
||||
constants.ControllerLabelResource: "x509",
|
||||
},
|
||||
))
|
||||
|
||||
if err := ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme()); err != nil {
|
||||
logger.Error(err, "cannot set controller reference", "resource", r.GetName())
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if checksum := tenantControlPlane.Status.Certificates.APIServer.Checksum; len(checksum) > 0 && checksum == utilities.GetObjectChecksum(r.resource) || len(r.resource.UID) > 0 {
|
||||
isCAValid, err := crypto.VerifyCertificate(r.resource.Data[kubeadmconstants.APIServerCertName], secretCA.Data[kubeadmconstants.CACertName], x509.ExtKeyUsageServerAuth)
|
||||
if err != nil {
|
||||
@@ -138,8 +152,6 @@ func (r *APIServerCertificate) mutate(ctx context.Context, tenantControlPlane *k
|
||||
|
||||
utilities.SetObjectChecksum(r.resource, r.resource.Data)
|
||||
|
||||
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
|
||||
|
||||
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||