mirror of
https://github.com/clastix/kamaji.git
synced 2026-03-02 01:30:43 +00:00
Compare commits
13 Commits
helm-v0.9.
...
helm-v0.9.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1ff03246c6 | ||
|
|
8335f645a5 | ||
|
|
70a791be74 | ||
|
|
b0293c23b5 | ||
|
|
50bba9bb2e | ||
|
|
f05f7eaf07 | ||
|
|
bfd34ef47e | ||
|
|
b73c7a20ed | ||
|
|
004441e77e | ||
|
|
0f85b6c534 | ||
|
|
b674738f0d | ||
|
|
6dc3cd1876 | ||
|
|
96a57fefa5 |
13
Makefile
13
Makefile
@@ -3,7 +3,7 @@
|
||||
# To re-generate a bundle for another specific version without changing the standard setup, you can:
|
||||
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
|
||||
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
|
||||
VERSION ?= 0.0.1
|
||||
VERSION ?= 0.1.0
|
||||
|
||||
# CHANNELS define the bundle channels used in the bundle.
|
||||
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
|
||||
@@ -36,7 +36,7 @@ IMAGE_TAG_BASE ?= clastix.io/operator
|
||||
BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION)
|
||||
|
||||
# Image URL to use all building/pushing image targets
|
||||
IMG ?= clastix/kamaji:latest
|
||||
IMG ?= clastix/kamaji:v$(VERSION)
|
||||
|
||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||
ifeq (,$(shell go env GOBIN))
|
||||
@@ -91,6 +91,10 @@ KUSTOMIZE = $(shell pwd)/bin/kustomize
|
||||
kustomize: ## Download kustomize locally if necessary.
|
||||
$(call install-kustomize,$(KUSTOMIZE),3.8.7)
|
||||
|
||||
APIDOCS_GEN = $(shell pwd)/bin/crdoc
|
||||
apidocs-gen: ## Download crdoc locally if necessary.
|
||||
$(call go-install-tool,$(APIDOCS_GEN),fybrik.io/crdoc@latest)
|
||||
|
||||
##@ Development
|
||||
|
||||
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
@@ -245,3 +249,8 @@ env:
|
||||
e2e: env load helm ginkgo ## Create a KinD cluster, install Kamaji on it and run the test suite.
|
||||
$(HELM) upgrade --debug --install kamaji ./charts/kamaji --create-namespace --namespace kamaji-system --set "image.pullPolicy=Never"
|
||||
$(GINKGO) -v ./e2e
|
||||
|
||||
##@ Document
|
||||
|
||||
apidoc: apidocs-gen
|
||||
$(APIDOCS_GEN) crdoc --resources config/crd/bases --output docs/apireference.md --template docs/templates/reference-cr.tmpl
|
||||
|
||||
@@ -74,8 +74,8 @@ Here are a few:
|
||||
- [ ] Custom Prometheus metrics for monitoring and alerting
|
||||
- [x] `kine` integration for MySQL as datastore
|
||||
- [x] `kine` integration for PostgreSQL as datastore
|
||||
- [ ] Deeper `kubeadm` integration
|
||||
- [ ] Pooling of multiple `etcd` datastores
|
||||
- [x] Pool of multiple datastores
|
||||
- [ ] Automatic assigning of Tenant Control Plane to a datastore
|
||||
- [ ] Autoscaling of Tenant Control Plane pods
|
||||
|
||||
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.9.1
|
||||
version: 0.9.2
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: 0.1.0
|
||||
appVersion: v0.1.0
|
||||
|
||||
home: https://github.com/clastix/kamaji
|
||||
sources: ["https://github.com/clastix/kamaji"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# kamaji
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler.
|
||||
|
||||
@@ -110,7 +110,7 @@ Here the values you can override:
|
||||
| healthProbeBindAddress | string | `":8081"` | The address the probe endpoint binds to. (default ":8081") |
|
||||
| image.pullPolicy | string | `"Always"` | |
|
||||
| image.repository | string | `"clastix/kamaji"` | The container image of the Kamaji controller. |
|
||||
| image.tag | string | `"latest"` | |
|
||||
| image.tag | string | `nil` | Overrides the image tag whose default is the chart appVersion. |
|
||||
| imagePullSecrets | list | `[]` | |
|
||||
| livenessProbe | object | `{"httpGet":{"path":"/healthz","port":"healthcheck"},"initialDelaySeconds":15,"periodSeconds":20}` | The livenessProbe for the controller container |
|
||||
| loggingDevel.enable | bool | `false` | (string) Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default false) |
|
||||
|
||||
@@ -9,8 +9,8 @@ image:
|
||||
# -- The container image of the Kamaji controller.
|
||||
repository: clastix/kamaji
|
||||
pullPolicy: Always
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: latest
|
||||
# -- Overrides the image tag whose default is the chart appVersion.
|
||||
tag:
|
||||
|
||||
# -- A list of extra arguments to add to the kamaji controller default ones
|
||||
extraArgs: []
|
||||
|
||||
@@ -1706,7 +1706,7 @@ spec:
|
||||
- --datastore=kamaji-etcd
|
||||
command:
|
||||
- /manager
|
||||
image: clastix/kamaji:latest
|
||||
image: clastix/kamaji:v0.1.0
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -13,4 +13,4 @@ kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: clastix/kamaji
|
||||
newTag: latest
|
||||
newTag: v0.1.0
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
include etcd/Makefile
|
||||
|
||||
deploy_path := $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
|
||||
|
||||
.DEFAULT_GOAL := kamaji
|
||||
|
||||
.PHONY: etcd-cluster
|
||||
reqs: etcd-cluster
|
||||
|
||||
.PHONY: kamaji
|
||||
kamaji: reqs
|
||||
@kubectl apply -f $(deploy_path)/../../config/install.yaml
|
||||
|
||||
.PHONY: destroy
|
||||
destroy: etcd-certificates/cleanup
|
||||
@kubectl delete -f $(deploy_path)/../../config/install.yaml
|
||||
@@ -1,26 +0,0 @@
|
||||
# Deploy Kamaji
|
||||
|
||||
## Quickstart with KinD
|
||||
|
||||
```sh
|
||||
make -C kind
|
||||
```
|
||||
|
||||
## Multi-tenant etcd cluster
|
||||
|
||||
> This assumes you already have a running Kubernetes cluster and kubeconfig.
|
||||
|
||||
```sh
|
||||
make -C etcd
|
||||
```
|
||||
|
||||
## Multi-tenant cluster using Kine
|
||||
|
||||
`kine` is an `etcd` shim that allows using different datastore.
|
||||
|
||||
Kamaji actually support the following backends:
|
||||
|
||||
- [MySQL](kine/mysql/README.md)
|
||||
- [PostgreSQL](kine/postgresql/README.md)
|
||||
|
||||
> This assumes you already have a running Kubernetes cluster and kubeconfig.
|
||||
@@ -1,680 +0,0 @@
|
||||
---
|
||||
# Source: calico/templates/calico-config.yaml
|
||||
# This ConfigMap is used to configure a self-hosted Calico installation.
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
# Typha is disabled.
|
||||
typha_service_name: "none"
|
||||
# Configure the backend to use.
|
||||
calico_backend: "vxlan"
|
||||
|
||||
# Configure the MTU to use for workload interfaces and tunnels.
|
||||
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
|
||||
# You can override auto-detection by providing a non-zero value.
|
||||
veth_mtu: "0"
|
||||
|
||||
# The CNI network configuration to install on each node. The special
|
||||
# values in this config will be automatically populated.
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.3.1",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"log_level": "info",
|
||||
"log_file_path": "/var/log/calico/cni/cni.log",
|
||||
"datastore_type": "kubernetes",
|
||||
"nodename": "__KUBERNETES_NODE_NAME__",
|
||||
"mtu": __CNI_MTU__,
|
||||
"ipam": {
|
||||
"type": "calico-ipam"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"snat": true,
|
||||
"capabilities": {"portMappings": true}
|
||||
},
|
||||
{
|
||||
"type": "bandwidth",
|
||||
"capabilities": {"bandwidth": true}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: calico-node
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- felixconfigurations
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- bgpconfigurations
|
||||
- ippools
|
||||
- ipamblocks
|
||||
- globalnetworkpolicies
|
||||
- globalnetworksets
|
||||
- networkpolicies
|
||||
- networksets
|
||||
- clusterinformations
|
||||
- hostendpoints
|
||||
- blockaffinities
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- ippools
|
||||
- felixconfigurations
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- bgpconfigurations
|
||||
- bgppeers
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- blockaffinities
|
||||
- ipamblocks
|
||||
- ipamhandles
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- ipamconfigs
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- blockaffinities
|
||||
verbs:
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- ippools
|
||||
verbs:
|
||||
- list
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- blockaffinities
|
||||
- ipamblocks
|
||||
- ipamhandles
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- watch
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- hostendpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- kubecontrollersconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- watch
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-node
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-node
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-kube-controllers
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: calico/templates/calico-node.yaml
|
||||
# This manifest installs the calico-node container, as well
|
||||
# as the CNI plugins and network config on
|
||||
# each master and worker node in a Kubernetes cluster.
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-node
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
# Make sure calico-node gets scheduled on all nodes.
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
serviceAccountName: calico-node
|
||||
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||
terminationGracePeriodSeconds: 0
|
||||
priorityClassName: system-node-critical
|
||||
initContainers:
|
||||
# This container performs upgrade from host-local IPAM to calico-ipam.
|
||||
# It can be deleted if this is a fresh installation, or if you have already
|
||||
# upgraded to use calico-ipam.
|
||||
- name: upgrade-ipam
|
||||
image: docker.io/calico/cni:v3.20.0
|
||||
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||
name: kubernetes-services-endpoint
|
||||
optional: true
|
||||
env:
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/cni/networks
|
||||
name: host-local-net-dir
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
securityContext:
|
||||
privileged: true
|
||||
# This container installs the CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: docker.io/calico/cni:v3.20.0
|
||||
command: ["/opt/cni/bin/install"]
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||
name: kubernetes-services-endpoint
|
||||
optional: true
|
||||
env:
|
||||
# Name of the CNI config file to create.
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-calico.conflist"
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: cni_network_config
|
||||
# Set the hostname based on the k8s node name.
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# CNI MTU Config variable
|
||||
- name: CNI_MTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Prevents the container from sleeping forever.
|
||||
- name: SLEEP
|
||||
value: "false"
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
securityContext:
|
||||
privileged: true
|
||||
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
|
||||
# to communicate with Felix over the Policy Sync API.
|
||||
- name: flexvol-driver
|
||||
image: docker.io/calico/pod2daemon-flexvol:v3.20.0
|
||||
volumeMounts:
|
||||
- name: flexvol-driver-host
|
||||
mountPath: /host/driver
|
||||
securityContext:
|
||||
privileged: true
|
||||
containers:
|
||||
# Runs calico-node container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: docker.io/calico/node:v3.20.0
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||
name: kubernetes-services-endpoint
|
||||
optional: true
|
||||
env:
|
||||
# Use Kubernetes API as the backing datastore.
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
# Wait for the datastore.
|
||||
- name: WAIT_FOR_DATASTORE
|
||||
value: "true"
|
||||
# Set based on the k8s node name.
|
||||
- name: NODENAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# Choose the backend to use.
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
# Cluster type to identify the deployment type
|
||||
- name: CLUSTER_TYPE
|
||||
value: "k8s"
|
||||
# Auto-detect the BGP IP address.
|
||||
- name: IP
|
||||
value: "autodetect"
|
||||
# Enable IPIP
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "Never"
|
||||
# Enable or Disable VXLAN on the default IP pool.
|
||||
- name: CALICO_IPV4POOL_VXLAN
|
||||
value: "Always"
|
||||
# Set MTU for tunnel device used if ipip is enabled
|
||||
- name: FELIX_IPINIPMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Set MTU for the VXLAN tunnel device.
|
||||
- name: FELIX_VXLANMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Set MTU for the Wireguard tunnel device.
|
||||
- name: FELIX_WIREGUARDMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
|
||||
# chosen from this range. Changing this value after installation will have
|
||||
# no effect. This should fall within `--cluster-cidr`.
|
||||
- name: CALICO_IPV4POOL_CIDR
|
||||
value: "10.36.0.0/16"
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
# Set Felix endpoint to host default action to ACCEPT.
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
# Disable IPv6 on Kubernetes.
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-live
|
||||
#- -bird-live
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-ready
|
||||
#- -bird-ready
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 10
|
||||
volumeMounts:
|
||||
# For maintaining CNI plugin API credentials.
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
readOnly: false
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
readOnly: false
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/calico
|
||||
name: var-lib-calico
|
||||
readOnly: false
|
||||
- name: policysync
|
||||
mountPath: /var/run/nodeagent
|
||||
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
|
||||
# parent directory.
|
||||
- name: sysfs
|
||||
mountPath: /sys/fs/
|
||||
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
|
||||
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
|
||||
mountPropagation: Bidirectional
|
||||
- name: cni-log-dir
|
||||
mountPath: /var/log/calico/cni
|
||||
readOnly: true
|
||||
volumes:
|
||||
# Used by calico-node.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
- name: sysfs
|
||||
hostPath:
|
||||
path: /sys/fs/
|
||||
type: DirectoryOrCreate
|
||||
# Used to install CNI.
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
# Used to access CNI logs.
|
||||
- name: cni-log-dir
|
||||
hostPath:
|
||||
path: /var/log/calico/cni
|
||||
# Mount in the directory for host-local IPAM allocations. This is
|
||||
# used when upgrading from host-local to calico-ipam, and can be removed
|
||||
# if not using the upgrade-ipam init container.
|
||||
- name: host-local-net-dir
|
||||
hostPath:
|
||||
path: /var/lib/cni/networks
|
||||
# Used to create per-pod Unix Domain Sockets
|
||||
- name: policysync
|
||||
hostPath:
|
||||
type: DirectoryOrCreate
|
||||
path: /var/run/nodeagent
|
||||
# Used to install Flex Volume Driver
|
||||
- name: flexvol-driver-host
|
||||
hostPath:
|
||||
type: DirectoryOrCreate
|
||||
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-kube-controllers.yaml
|
||||
# See https://github.com/projectcalico/kube-controllers
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
spec:
|
||||
# The controllers can only have a single active instance.
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-kube-controllers
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
spec:
|
||||
tolerations:
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
serviceAccountName: calico-kube-controllers
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: calico-kube-controllers
|
||||
image: docker.io/calico/kube-controllers:v3.20.0
|
||||
resouces:
|
||||
env:
|
||||
# Choose which controllers to run.
|
||||
- name: ENABLED_CONTROLLERS
|
||||
value: node
|
||||
- name: DATASTORE_TYPE
|
||||
value: kubernetes
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /usr/bin/check-status
|
||||
- -l
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /usr/bin/check-status
|
||||
- -r
|
||||
periodSeconds: 10
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
|
||||
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
spec:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-kube-controllers
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,687 +0,0 @@
|
||||
---
|
||||
# Source: calico/templates/calico-config.yaml
|
||||
# This ConfigMap is used to configure a self-hosted Calico installation.
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
# Typha is disabled.
|
||||
typha_service_name: "none"
|
||||
# Configure the backend to use.
|
||||
calico_backend: "bird"
|
||||
|
||||
# Configure the MTU to use for workload interfaces and tunnels.
|
||||
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
|
||||
# You can override auto-detection by providing a non-zero value.
|
||||
veth_mtu: "0"
|
||||
|
||||
# The CNI network configuration to install on each node. The special
|
||||
# values in this config will be automatically populated.
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.3.1",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"log_level": "info",
|
||||
"log_file_path": "/var/log/calico/cni/cni.log",
|
||||
"datastore_type": "kubernetes",
|
||||
"nodename": "__KUBERNETES_NODE_NAME__",
|
||||
"mtu": __CNI_MTU__,
|
||||
"ipam": {
|
||||
"type": "calico-ipam"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"snat": true,
|
||||
"capabilities": {"portMappings": true}
|
||||
},
|
||||
{
|
||||
"type": "bandwidth",
|
||||
"capabilities": {"bandwidth": true}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: calico-node
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- felixconfigurations
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- bgpconfigurations
|
||||
- ippools
|
||||
- ipamblocks
|
||||
- globalnetworkpolicies
|
||||
- globalnetworksets
|
||||
- networkpolicies
|
||||
- networksets
|
||||
- clusterinformations
|
||||
- hostendpoints
|
||||
- blockaffinities
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- ippools
|
||||
- felixconfigurations
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- bgpconfigurations
|
||||
- bgppeers
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- blockaffinities
|
||||
- ipamblocks
|
||||
- ipamhandles
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- ipamconfigs
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- blockaffinities
|
||||
verbs:
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- ippools
|
||||
verbs:
|
||||
- list
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- blockaffinities
|
||||
- ipamblocks
|
||||
- ipamhandles
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- watch
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- hostendpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- kubecontrollersconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- watch
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-node
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-node
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-kube-controllers
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: calico/templates/calico-node.yaml
|
||||
# This manifest installs the calico-node container, as well
|
||||
# as the CNI plugins and network config on
|
||||
# each master and worker node in a Kubernetes cluster.
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-node
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
# Make sure calico-node gets scheduled on all nodes.
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
serviceAccountName: calico-node
|
||||
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||
terminationGracePeriodSeconds: 0
|
||||
priorityClassName: system-node-critical
|
||||
initContainers:
|
||||
# This container performs upgrade from host-local IPAM to calico-ipam.
|
||||
# It can be deleted if this is a fresh installation, or if you have already
|
||||
# upgraded to use calico-ipam.
|
||||
- name: upgrade-ipam
|
||||
image: docker.io/calico/cni:v3.20.0
|
||||
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||
name: kubernetes-services-endpoint
|
||||
optional: true
|
||||
env:
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/cni/networks
|
||||
name: host-local-net-dir
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
securityContext:
|
||||
privileged: true
|
||||
# This container installs the CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: docker.io/calico/cni:v3.20.0
|
||||
command: ["/opt/cni/bin/install"]
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||
name: kubernetes-services-endpoint
|
||||
optional: true
|
||||
env:
|
||||
# Name of the CNI config file to create.
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-calico.conflist"
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: cni_network_config
|
||||
# Set the hostname based on the k8s node name.
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# CNI MTU Config variable
|
||||
- name: CNI_MTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Prevents the container from sleeping forever.
|
||||
- name: SLEEP
|
||||
value: "false"
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
securityContext:
|
||||
privileged: true
|
||||
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
|
||||
# to communicate with Felix over the Policy Sync API.
|
||||
- name: flexvol-driver
|
||||
image: docker.io/calico/pod2daemon-flexvol:v3.20.0
|
||||
volumeMounts:
|
||||
- name: flexvol-driver-host
|
||||
mountPath: /host/driver
|
||||
securityContext:
|
||||
privileged: true
|
||||
containers:
|
||||
# Runs calico-node container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: docker.io/calico/node:v3.20.0
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||
name: kubernetes-services-endpoint
|
||||
optional: true
|
||||
env:
|
||||
# Use Kubernetes API as the backing datastore.
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
# Wait for the datastore.
|
||||
- name: WAIT_FOR_DATASTORE
|
||||
value: "true"
|
||||
# Set based on the k8s node name.
|
||||
- name: NODENAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# Choose the backend to use.
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
# Cluster type to identify the deployment type
|
||||
- name: CLUSTER_TYPE
|
||||
value: "k8s"
|
||||
# Auto-detect the BGP IP address.
|
||||
- name: IP
|
||||
value: "autodetect"
|
||||
- name: IP_AUTODETECTION_METHOD
|
||||
value: "can-reach=192.168.32.0"
|
||||
# Enable IPIP
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "Never"
|
||||
# Enable or Disable VXLAN on the default IP pool.
|
||||
- name: CALICO_IPV4POOL_VXLAN
|
||||
value: "Never"
|
||||
# Set MTU for tunnel device used if ipip is enabled
|
||||
- name: FELIX_IPINIPMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Set MTU for the VXLAN tunnel device.
|
||||
- name: FELIX_VXLANMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Set MTU for the Wireguard tunnel device.
|
||||
- name: FELIX_WIREGUARDMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
|
||||
# chosen from this range. Changing this value after installation will have
|
||||
# no effect. This should fall within `--cluster-cidr`.
|
||||
# - name: CALICO_IPV4POOL_CIDR
|
||||
# value: "192.168.0.0/16"
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
# Set Felix endpoint to host default action to ACCEPT.
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
# Disable IPv6 on Kubernetes.
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-live
|
||||
- -bird-live
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-ready
|
||||
- -bird-ready
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 10
|
||||
volumeMounts:
|
||||
# For maintaining CNI plugin API credentials.
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
readOnly: false
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
readOnly: false
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/calico
|
||||
name: var-lib-calico
|
||||
readOnly: false
|
||||
- name: policysync
|
||||
mountPath: /var/run/nodeagent
|
||||
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
|
||||
# parent directory.
|
||||
- name: sysfs
|
||||
mountPath: /sys/fs/
|
||||
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
|
||||
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
|
||||
mountPropagation: Bidirectional
|
||||
- name: cni-log-dir
|
||||
mountPath: /var/log/calico/cni
|
||||
readOnly: true
|
||||
volumes:
|
||||
# Used by calico-node.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
- name: sysfs
|
||||
hostPath:
|
||||
path: /sys/fs/
|
||||
type: DirectoryOrCreate
|
||||
# Used to install CNI.
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
# Used to access CNI logs.
|
||||
- name: cni-log-dir
|
||||
hostPath:
|
||||
path: /var/log/calico/cni
|
||||
# Mount in the directory for host-local IPAM allocations. This is
|
||||
# used when upgrading from host-local to calico-ipam, and can be removed
|
||||
# if not using the upgrade-ipam init container.
|
||||
- name: host-local-net-dir
|
||||
hostPath:
|
||||
path: /var/lib/cni/networks
|
||||
# Used to create per-pod Unix Domain Sockets
|
||||
- name: policysync
|
||||
hostPath:
|
||||
type: DirectoryOrCreate
|
||||
path: /var/run/nodeagent
|
||||
# Used to install Flex Volume Driver
|
||||
- name: flexvol-driver-host
|
||||
hostPath:
|
||||
type: DirectoryOrCreate
|
||||
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-kube-controllers.yaml
|
||||
# See https://github.com/projectcalico/kube-controllers
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
spec:
|
||||
# The controllers can only have a single active instance.
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-kube-controllers
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
serviceAccountName: calico-kube-controllers
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: calico-kube-controllers
|
||||
image: docker.io/calico/kube-controllers:v3.20.0
|
||||
env:
|
||||
# Choose which controllers to run.
|
||||
- name: ENABLED_CONTROLLERS
|
||||
value: node
|
||||
- name: DATASTORE_TYPE
|
||||
value: kubernetes
|
||||
resources:
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /usr/bin/check-status
|
||||
- -l
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /usr/bin/check-status
|
||||
- -r
|
||||
periodSeconds: 10
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
|
||||
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
spec:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-kube-controllers
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
# azure parameters
|
||||
export KAMAJI_REGION=westeurope
|
||||
export KAMAJI_RG=Kamaji
|
||||
# https://docs.microsoft.com/en-us/azure/aks/faq#why-are-two-resource-groups-created-with-aks
|
||||
export KAMAJI_CLUSTER=kamaji
|
||||
export KAMAJI_NODE_RG=MC_${KAMAJI_RG}_${KAMAJI_CLUSTER}_${KAMAJI_REGION}
|
||||
export KAMAJI_CLUSTER=kamaji
|
||||
export KAMAJI_VNET_NAME=kamaji-net
|
||||
export KAMAJI_VNET_ADDRESS=10.224.0.0/12
|
||||
export KAMAJI_SUBNET_NAME=kamaji-subnet
|
||||
export KAMAJI_SUBNET_ADDRESS=10.224.0.0/16
|
||||
|
||||
# kamaji parameters
|
||||
export KAMAJI_NAMESPACE=kamaji-system
|
||||
|
||||
# tenant cluster parameters
|
||||
export TENANT_NAMESPACE=tenants
|
||||
export TENANT_NAMESPACE=default
|
||||
export TENANT_NAME=tenant-00
|
||||
export TENANT_DOMAIN=$KAMAJI_REGION.cloudapp.azure.com
|
||||
export TENANT_VERSION=v1.23.5
|
||||
export TENANT_VERSION=v1.25.0
|
||||
export TENANT_PORT=6443 # port used to expose the tenant api server
|
||||
export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server
|
||||
export TENANT_POD_CIDR=10.36.0.0/16
|
||||
@@ -21,10 +24,8 @@ export TENANT_DNS_SERVICE=10.96.0.10
|
||||
|
||||
export TENANT_VM_SIZE=Standard_D2ds_v4
|
||||
export TENANT_VM_IMAGE=UbuntuLTS
|
||||
export TENANT_RG=$TENANT_NAME
|
||||
export TENANT_NSG=$TENANT_NAME-nsg
|
||||
export TENANT_VNET_NAME=$TENANT_NAME
|
||||
export TENANT_VNET_ADDRESS=172.12.0.0/16
|
||||
export TENANT_SUBNET_NAME=$TENANT_NAME-subnet
|
||||
export TENANT_SUBNET_ADDRESS=172.12.10.0/24
|
||||
export TENANT_VMSS=$TENANT_NAME-vmss
|
||||
export TENANT_SUBNET_ADDRESS=10.225.0.0/16
|
||||
export TENANT_VMSS=$TENANT_NAME-vmss
|
||||
|
||||
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
export KAMAJI_NAMESPACE=kamaji-system
|
||||
|
||||
# tenant cluster parameters
|
||||
export TENANT_NAMESPACE=tenants
|
||||
export TENANT_NAMESPACE=default
|
||||
export TENANT_NAME=tenant-00
|
||||
export TENANT_DOMAIN=clastix.labs
|
||||
export TENANT_VERSION=v1.23.5
|
||||
export TENANT_VERSION=v1.25.0
|
||||
export TENANT_PORT=6443 # port used to expose the tenant api server
|
||||
export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server
|
||||
export TENANT_POD_CIDR=10.36.0.0/16
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
# Kine integration
|
||||
|
||||
[kine](https://github.com/k3s-io/kine) is an `etcd` shim that allows to use a different datastore for your Kubernetes cluster.
|
||||
|
||||
Kamaji actually allows to run a shared datastore using different MySQL and PostgreSQL schemas per Tenant.
|
||||
This can help in overcoming the `etcd` limitation regarding scalability and cluster size, as well with HA and replication.
|
||||
|
||||
## Kamaji additional CLI flags
|
||||
|
||||
Kamaji read the data store configuration from a cluster-scoped resource named `DataStore`, containing all tha required details to secure a connection using a specific driver.
|
||||
|
||||
- [Example of a `etcd` DataStore](./../../config/samples/kamaji_v1alpha1_datastore_etcd.yaml)
|
||||
- [Example of a `MySQL` DataStore](./../../config/samples/kamaji_v1alpha1_datastore_mysql.yaml)
|
||||
- [Example of a `PostgreSQL` DataStore](./../../config/samples/kamaji_v1alpha1_datastore_postgresql.yaml)
|
||||
|
||||
Once the datastore is running, and the `DataStore` has been created with the required details, we need to provide information about it to Kamaji by using the following flag and pointing to the resource name:
|
||||
|
||||
```
|
||||
--datastore={.metadata.name}
|
||||
```
|
||||
|
||||
## Drivers
|
||||
|
||||
Further details on the setup for each driver are available here:
|
||||
- [MySQL/MariaDB](../deploy/kine/mysql/README.md)
|
||||
- [PostgreSQL](../deploy/kine/postgresql/README.md)
|
||||
@@ -15,6 +15,7 @@ mariadb-certificates:
|
||||
chmod 644 $(ROOT_DIR)/certs/*
|
||||
|
||||
mariadb-secret:
|
||||
@kubectl create namespace kamaji-system --dry-run=client -o yaml | kubectl apply -f -
|
||||
@kubectl -n kamaji-system create secret generic mysql-config \
|
||||
--from-file=$(ROOT_DIR)/certs/ca.crt --from-file=$(ROOT_DIR)/certs/ca.key \
|
||||
--from-file=$(ROOT_DIR)/certs/server.key --from-file=$(ROOT_DIR)/certs/server.crt \
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
# MySQL as Kubernetes Storage
|
||||
|
||||
Kamaji offers the possibility of having a different storage system than `ETCD` thanks to [kine](https://github.com/k3s-io/kine). One of the implementations is [MySQL](https://www.mysql.com/).
|
||||
|
||||
Kamaji project is developed using [kind](https://kind.sigs.k8s.io), therefore, MySQL (or [MariaDB](https://mariadb.org/) in this case) will be deployed into the local kubernetes cluster in order to be used as storage for the tenants.
|
||||
|
||||
There is a Makefile to help with the process:
|
||||
|
||||
# Setup
|
||||
|
||||
Setup of the MySQL/MariaDB backend can be easily issued with a single command.
|
||||
|
||||
```bash
|
||||
$ make mariadb
|
||||
```
|
||||
|
||||
This action will perform all the necessary stuffs to have MariaDB as Kubernetes storage backend using kine.
|
||||
|
||||
```shell
|
||||
rm -rf /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs && mkdir /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs
|
||||
cfssl gencert -initca /home/prometherion/Documents/clastix/kamaji/deploy/mysql/ca-csr.json | cfssljson -bare /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/ca
|
||||
2022/08/18 23:52:56 [INFO] generating a new CA key and certificate from CSR
|
||||
2022/08/18 23:52:56 [INFO] generate received request
|
||||
2022/08/18 23:52:56 [INFO] received CSR
|
||||
2022/08/18 23:52:56 [INFO] generating key: rsa-2048
|
||||
2022/08/18 23:52:56 [INFO] encoded CSR
|
||||
2022/08/18 23:52:56 [INFO] signed certificate with serial number 310428005543054656774215122317606431230766314770
|
||||
cfssl gencert -ca=/home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/ca.crt -ca-key=/home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/ca.key \
|
||||
-config=/home/prometherion/Documents/clastix/kamaji/deploy/mysql/config.json -profile=server \
|
||||
/home/prometherion/Documents/clastix/kamaji/deploy/mysql/server-csr.json | cfssljson -bare /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/server
|
||||
2022/08/18 23:52:56 [INFO] generate received request
|
||||
2022/08/18 23:52:56 [INFO] received CSR
|
||||
2022/08/18 23:52:56 [INFO] generating key: rsa-2048
|
||||
2022/08/18 23:52:56 [INFO] encoded CSR
|
||||
2022/08/18 23:52:56 [INFO] signed certificate with serial number 582698914718104852311252458344736030793138969927
|
||||
chmod 644 /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/*
|
||||
secret/mysql-config created
|
||||
secret/kine-secret created
|
||||
serviceaccount/mariadb created
|
||||
service/mariadb created
|
||||
deployment.apps/mariadb created
|
||||
persistentvolumeclaim/pvc-mariadb created
|
||||
```
|
||||
|
||||
## Certificate creation
|
||||
|
||||
```bash
|
||||
$ make mariadb-certificates
|
||||
```
|
||||
|
||||
Communication between kine and the backend is encrypted, therefore, a CA and a certificate from it must be created.
|
||||
|
||||
## Secret Deployment
|
||||
|
||||
```bash
|
||||
$ make mariadb-secrets
|
||||
```
|
||||
|
||||
Previous certificates and MySQL configuration have to be available in order to be used.
|
||||
They will be under the secret `kamaji-system:mysql-config`, used by the MySQL/MariaDB instance.
|
||||
|
||||
## Deployment
|
||||
|
||||
```bash
|
||||
$ make mariadb-deployment
|
||||
```
|
||||
|
||||
Finally, starts the MySQL/MariaDB installation with all the required settings, such as SSL connection, and configuration.
|
||||
|
||||
# Cleanup
|
||||
|
||||
```bash
|
||||
$ make mariadb-destroy
|
||||
```
|
||||
@@ -7,6 +7,7 @@ cnpg-setup:
|
||||
|
||||
cnpg-deploy:
|
||||
@kubectl -n cnpg-system rollout status deployment/cnpg-controller-manager
|
||||
@kubectl create namespace kamaji-system --dry-run=client -o yaml | kubectl apply -f -
|
||||
@kubectl -n kamaji-system apply -f postgresql.yaml
|
||||
@while ! kubectl -n kamaji-system get secret postgresql-superuser > /dev/null 2>&1; do sleep 1; done
|
||||
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
# PostgreSQL as Kubernetes Storage
|
||||
|
||||
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine).
|
||||
One of the implementations is [PostgreSQL](https://www.postgresql.org/).
|
||||
|
||||
Kamaji project is developed using [kind](https://kind.sigs.k8s.io), therefore, a PostgreSQL instance must be deployed in advance into the local kubernetes cluster in order to be used as storage for the tenants.
|
||||
For the sake of simplicity, the [cloudnative-pg](https://cloudnative-pg.io/) Operator will be used to simplify the setup of it.
|
||||
|
||||
There is a Makefile to help with the process:
|
||||
|
||||
## Setup
|
||||
|
||||
```bash
|
||||
$ make postgresql
|
||||
```
|
||||
|
||||
This target will install the `cloudnative-pg`, creating the PostgreSQL instance in the Kamaji Namespace, along with the generation of the required Secret resource for the kine integration.
|
||||
|
||||
This action is idempotent and doesn't overwrite values if they already exist.
|
||||
|
||||
```shell
|
||||
namespace/cnpg-system unchanged
|
||||
customresourcedefinition.apiextensions.k8s.io/backups.postgresql.cnpg.io configured
|
||||
customresourcedefinition.apiextensions.k8s.io/clusters.postgresql.cnpg.io configured
|
||||
customresourcedefinition.apiextensions.k8s.io/poolers.postgresql.cnpg.io configured
|
||||
customresourcedefinition.apiextensions.k8s.io/scheduledbackups.postgresql.cnpg.io configured
|
||||
serviceaccount/cnpg-manager unchanged
|
||||
clusterrole.rbac.authorization.k8s.io/cnpg-manager configured
|
||||
clusterrolebinding.rbac.authorization.k8s.io/cnpg-manager-rolebinding unchanged
|
||||
configmap/cnpg-default-monitoring unchanged
|
||||
service/cnpg-webhook-service unchanged
|
||||
deployment.apps/cnpg-controller-manager unchanged
|
||||
mutatingwebhookconfiguration.admissionregistration.k8s.io/cnpg-mutating-webhook-configuration configured
|
||||
validatingwebhookconfiguration.admissionregistration.k8s.io/cnpg-validating-webhook-configuration configured
|
||||
deployment "cnpg-controller-manager" successfully rolled out
|
||||
cluster.postgresql.cnpg.io/postgresql unchanged
|
||||
secret/postgres-root-cert created
|
||||
```
|
||||
|
||||
## Operator setup
|
||||
|
||||
```bash
|
||||
$ make cnpg-setup
|
||||
```
|
||||
|
||||
This target will apply all the required manifests with the `cloudnative-pg` CRD, and required RBAC, and Deployment.
|
||||
|
||||
Release [v1.16.0](https://github.com/cloudnative-pg/cloudnative-pg/releases/tag/v1.16.0) has been tested successfully.
|
||||
|
||||
## SSL certificate Secret generation
|
||||
|
||||
```bash
|
||||
$ make postgresql-secret
|
||||
```
|
||||
|
||||
This target will download locally the `kubectl-cnpg` utility to generate an SSL certificate required to secure the connection to the PostgreSQL instance.
|
||||
|
||||
## Certificate generation
|
||||
|
||||
```bash
|
||||
$ make postgresql-secret
|
||||
```
|
||||
|
||||
Generate the Certificate required to connect to the DataStore.
|
||||
|
||||
## Teardown
|
||||
|
||||
```bash
|
||||
$ make postgresql-destroy
|
||||
```
|
||||
|
||||
This will lead to the deletion of the `cloudnative-pg` Operator, along with any instance, and related secrets.
|
||||
|
||||
This action is idempotent.
|
||||
@@ -28,5 +28,5 @@ runcmd:
|
||||
- sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
|
||||
- echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
|
||||
- sudo apt update
|
||||
- sudo apt install -y kubelet=1.23.5-00 kubeadm=1.23.5-00 kubectl=1.23.5-00
|
||||
- sudo apt-mark hold kubelet kubeadm kubectl
|
||||
- sudo apt install -y kubelet=1.25.0-00 kubeadm=1.25.0-00 kubectl=1.25.0-00
|
||||
- sudo apt-mark hold kubelet kubeadm kubectl containerd
|
||||
@@ -4,10 +4,15 @@
|
||||
- [Architecture](./architecture.md)
|
||||
- [Getting started](./getting-started-with-kamaji.md)
|
||||
- Guides:
|
||||
- [Deploy Kamaji](./kamaji-deployment-guide.md)
|
||||
- [Deploy Kamaji on generic infrastructure](./kamaji-deployment-guide.md)
|
||||
- [Deploy Kamaji on Azure](./kamaji-azure-deployment-guide.md)
|
||||
- Deploy Kamaji on AWS
|
||||
- Deploy Kamaji on GCP
|
||||
- Deploy Kamaji on OpenStack
|
||||
- [Setup Konnectivity service](./konnectivity.md)
|
||||
- [MySQL as Kamaji datastore](./mysql-datastore.md)
|
||||
- [PostgreSQL as Kamaji datastore](./postgresql-datastore.md)
|
||||
- [Tenant Cluster Upgrade](./upgrade.md)
|
||||
- [Reference](./reference.md)
|
||||
- [CNCF Conformance](./conformance.md)
|
||||
- [Versioning](./versioning.md)
|
||||
4185
docs/apireference.md
Normal file
4185
docs/apireference.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -9,17 +9,23 @@ High Availability and rolling updates of the Tenant Control Plane pods are provi
|
||||
|
||||
Kamaji offers a [Custom Resource Definition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing a Tenant Control Plane. This *CRD* is called `TenantControlPlane`, or `tcp` in short.
|
||||
|
||||
All the _“tenant clusters”_ built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves. See [CNCF compliance](./compliance.md).
|
||||
|
||||
## Tenant worker nodes
|
||||
And what about the tenant worker nodes? They are just _"worker nodes"_, i.e. regular virtual or bare metal machines, connecting to the APIs server of the Tenant Control Plane. Kamaji's goal is to manage the lifecycle of hundreds of these _“tenant clusters”_, not only one, so how to add another tenant cluster to Kamaji? As you could expect, you have just deploys a new Tenant Control Plane in one of the _“admin cluster”_ namespace, and then joins the tenant worker nodes to it.
|
||||
|
||||
All the tenant clusters built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves.
|
||||
We have in roadmap, the Cluster APIs support as well as a Terraform provider so that you can create _“tenant clusters”_ in a declarative way.
|
||||
|
||||
## Save the state
|
||||
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each tenant cluster saves the state to be able to store and retrieve data. A dedicated `etcd` cluster for each tenant cluster doesn’t scale well for a managed service because `etcd` data persistence can be cumbersome at scale, rising the operational effort to mitigate it. So we have to find an alternative keeping in mind our goal for a resilient and cost-optimized solution at the same time. As we can deploy any Kubernetes cluster with an external `etcd` cluster, we explored this option for the tenant control planes. On the admin cluster, we deploy a multi-tenant `etcd` cluster storing the state of multiple tenant clusters.
|
||||
## Datastores
|
||||
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each tenant cluster saves the state to be able to store and retrieve data. A dedicated `etcd` cluster for each tenant cluster doesn’t scale well for a managed service because `etcd` data persistence can be cumbersome at scale, rising the operational effort to mitigate it. So we have to find an alternative keeping in mind our goal for a resilient and cost-optimized solution at the same time.
|
||||
|
||||
With this solution, the resiliency is guaranteed by the usual `etcd` mechanism, and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that we have to operate an external `etcd` cluster, in addition to `etcd` of the _“admin cluster”_ and manage the access to be sure that each _“tenant cluster”_ uses only its data. Also, there are limits in size in `etcd`, defaulted to 2GB and configurable to a maximum of 8GB. We’re solving this issue by pooling multiple `etcd` togheter and sharding the Tenant Control Planes.
|
||||
As we can deploy any Kubernetes cluster with an external `etcd` cluster, we explored this option for the tenant control planes. On the admin cluster, we can deploy a multi-tenant `etcd` datastore to save the state of multiple tenant clusters. Kamaji offers a Custom Resource Definition called `DataStore` to provide a declarative approach of managing Tenant datastores. With this solution, the resiliency is guaranteed by the usual `etcd` mechanism, and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that we have to operate an external datastore, in addition to `etcd` of the _“admin cluster”_ and manage the access to be sure that each _“tenant cluster”_ uses only its data.
|
||||
|
||||
Optionally, Kamaji offers the possibility of using a different storage system than `etcd` to save the state of the tenants' clusters, like MySQL or PostgreSQL compatible databases, thanks to the [kine](https://github.com/k3s-io/kine) integration.
|
||||
### Other storage drivers
|
||||
Kamaji offers the option of using a more capable datastore than `etcd` to save the state of multiple tenants' clusters. Thanks to the native [kine](https://github.com/k3s-io/kine) integration, you can run _MySQL_ or _PostgreSQL_ compatible databases as datastore for _“tenant clusters”_.
|
||||
|
||||
### Pooling
|
||||
By default, Kamaji is expecting to persist all the _“tenant clusters”_ data in a unique datastore that could be backed by different drivers. However, you can pick a different datastore for a specific set of _“tenant clusters”_ that could have different resources assigned or a different tiering. Pooling of multiple datastore is an option you can leverage for a very large set of _“tenant clusters”_ so you can distribute the load properly. As future improvements, we have a _datastore scheduler_ feature in roadmap so that Kamaji itself can assign automatically a _“tenant cluster”_ to the best datastore in the pool.
|
||||
|
||||
## Requirements of design
|
||||
These are requirements of design behind Kamaji:
|
||||
|
||||
60
docs/conformance.md
Normal file
60
docs/conformance.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# CNCF Conformance
|
||||
For organizations using Kubernetes, conformance enables interoperability, consistency, and confirmability between Kubernetes installations. The Cloud Computing Native Foundation - CNCF - provides the [Certified Kubernetes Conformance Program](https://www.cncf.io/certification/software-conformance/). All the _“tenant clusters”_ built with Kamaji are CNCF conformant.
|
||||
|
||||
The standard set of conformance tests is currently those defined by the `[Conformance]` tag in the
|
||||
[kubernetes e2e](https://github.com/kubernetes/kubernetes/tree/master/test/e2e) suite.
|
||||
|
||||
## Running the conformance tests
|
||||
|
||||
The standard tool for running CNCF conformance tests is [Sonobuoy](https://github.com/vmware-tanzu/sonobuoy). Sonobuoy is
|
||||
regularly built and kept up to date to execute against all currently supported versions of kubernetes.
|
||||
|
||||
Download a [binary release](https://github.com/vmware-tanzu/sonobuoy/releases) of the CLI.
|
||||
|
||||
Make sure to access your tenant cluster:
|
||||
|
||||
```
|
||||
export KUBECONFIG=tenant.kubeconfig
|
||||
```
|
||||
|
||||
Deploy a Sonobuoy pod to your tenant cluster with:
|
||||
|
||||
```
|
||||
sonobuoy run --mode=certified-conformance
|
||||
```
|
||||
|
||||
> You can run the command synchronously by adding the flag `--wait` but be aware that running the conformance tests can take an hour or more.
|
||||
|
||||
View actively running pods:
|
||||
|
||||
```
|
||||
sonobuoy status
|
||||
```
|
||||
|
||||
To inspect the logs:
|
||||
|
||||
```
|
||||
sonobuoy logs -f
|
||||
```
|
||||
|
||||
Once `sonobuoy status` shows the run as `completed`, copy the output directory from the main Sonobuoy pod to a local directory:
|
||||
|
||||
```
|
||||
outfile=$(sonobuoy retrieve)
|
||||
```
|
||||
|
||||
This copies a single `.tar.gz` snapshot from the Sonobuoy pod into your local
|
||||
`.` directory. Extract the contents into `./results` with:
|
||||
|
||||
```
|
||||
mkdir ./results; tar xzf $outfile -C ./results
|
||||
```
|
||||
|
||||
To clean up Kubernetes objects created by Sonobuoy, run:
|
||||
|
||||
```
|
||||
sonobuoy delete
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Setup a minimal Kamaji for development
|
||||
|
||||
This document explains how to deploy a minimal Kamaji setup on [KinD](https://kind.sigs.k8s.io/) for development scopes. Please refer to the [Kamaji documentation](../README.md) for understanding all the terms used in this guide, as for example: `admin cluster` and `tenant control plane`.
|
||||
This document explains how to deploy a minimal Kamaji setup on [KinD](https://kind.sigs.k8s.io/) for development scopes. Please refer to the [Kamaji documentation](../concepts.md) for understanding all the terms used in this guide, as for example: `admin cluster`, `tenant cluster`, and `tenant control plane`.
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
@@ -15,6 +15,7 @@ We assume you have installed on your workstation:
|
||||
- [cfssl](https://github.com/cloudflare/cfssl)
|
||||
- [cfssljson](https://github.com/cloudflare/cfssl)
|
||||
|
||||
|
||||
> Starting from Kamaji v0.0.2, `kubectl` and `kubeadm` need to meet at least minimum version to `v1.25.0`:
|
||||
> this is required due to the latest changes addressed from the release Kubernetes 1.25 release regarding the `kubelet-config` ConfigMap required for the node join.
|
||||
|
||||
@@ -24,9 +25,9 @@ The instance of Kamaji is made of a single node hosting:
|
||||
|
||||
- admin control-plane
|
||||
- admin worker
|
||||
- multi-tenant etcd cluster
|
||||
- multi-tenant datastore
|
||||
|
||||
### Standard
|
||||
### Standard installation
|
||||
|
||||
You can install your KinD cluster, ETCD multi-tenant cluster and Kamaji operator with a **single command**:
|
||||
|
||||
@@ -38,29 +39,55 @@ Now you can [create your first `TenantControlPlane`](#deploy-tenant-control-plan
|
||||
|
||||
### Data store-specific
|
||||
|
||||
#### ETCD
|
||||
Kamaji offers the possibility of using a different storage system than `ETCD` for the tenants, like `MySQL` or `PostgreSQL` compatible databases.
|
||||
|
||||
The multi-tenant etcd cluster is deployed as statefulset into the Kamaji node.
|
||||
|
||||
Run `make reqs` to setup Kamaji's requisites on KinD:
|
||||
First, setup a KinD cluster:
|
||||
|
||||
```bash
|
||||
$ make -C deploy/kind reqs
|
||||
$ make -C deploy/kind kind
|
||||
```
|
||||
|
||||
At this moment you will have your KinD up and running and ETCD cluster in multitenant mode.
|
||||
#### ETCD
|
||||
|
||||
Deploy a multi-tenant `ETCD` cluster into the Kamaji node:
|
||||
|
||||
```bash
|
||||
$ make -C deploy/kind etcd-cluster
|
||||
```
|
||||
|
||||
Now you're ready to [install Kamaji operator](#install-kamaji).
|
||||
|
||||
#### Kine
|
||||
#### MySQL
|
||||
|
||||
> The MySQL-compatible cluster provisioning is omitted here.
|
||||
Deploy a MySQL/MariaDB backend into the Kamaji node:
|
||||
|
||||
Kamaji offers the possibility of using a different storage system than `ETCD` for the tenants, like MySQL or PostgreSQL compatible databases.
|
||||
```bash
|
||||
$ make -C deploy/kine/mysql mariadb
|
||||
```
|
||||
|
||||
Read it more in the provided [guide](../deploy/kine/README.md).
|
||||
Adjust the [Kamaji install manifest](../config/install.yaml) according to the example of a [MySQL DataStore](../config/samples/kamaji_v1alpha1_datastore_mysql.yaml) and make sure Kamaji uses the proper datastore name:
|
||||
|
||||
Assuming you adjusted the [Kamaji manifest](../config/install.yaml) to connect to Kine and compatible database using the proper driver, you can now install it.
|
||||
```
|
||||
--datastore={.metadata.name}
|
||||
```
|
||||
|
||||
Now you're ready to [install Kamaji operator](#install-kamaji).
|
||||
|
||||
#### PostgreSQL
|
||||
|
||||
Deploy a PostgreSQL backend into the Kamaji node:
|
||||
|
||||
```bash
|
||||
$ make -C deploy/kine/postgresql postgresql
|
||||
```
|
||||
|
||||
Adjust the [Kamaji install manifest](../config/install.yaml) according to the example of a [PostgreSQL DataStore](../config/samples/kamaji_v1alpha1_datastore_postgresql.yaml) and make sure Kamaji uses the proper datastore name:
|
||||
|
||||
```
|
||||
--datastore={.metadata.name}
|
||||
```
|
||||
|
||||
Now you're ready to [install Kamaji operator](#install-kamaji).
|
||||
|
||||
### Install Kamaji
|
||||
|
||||
@@ -68,6 +95,8 @@ Assuming you adjusted the [Kamaji manifest](../config/install.yaml) to connect t
|
||||
$ kubectl apply -f config/install.yaml
|
||||
```
|
||||
|
||||
> If you experience some errors during the apply of the manifest as `resource mapping not found ... ensure CRDs are installed first`, just apply it again.
|
||||
|
||||
### Deploy Tenant Control Plane
|
||||
|
||||
Now it is the moment of deploying your first tenant control plane.
|
||||
@@ -150,7 +179,7 @@ $ kubectl create -f https://raw.githubusercontent.com/aojea/kindnet/master/insta
|
||||
### Join worker nodes
|
||||
|
||||
```bash
|
||||
$ make kamaji-kind-worker-join
|
||||
$ make -C deploy/kind kamaji-kind-worker-join
|
||||
```
|
||||
|
||||
> To add more worker nodes, run again the command above.
|
||||
@@ -163,12 +192,12 @@ NAME STATUS ROLES AGE VERSION
|
||||
d2d4b468c9de Ready <none> 44s v1.23.4
|
||||
```
|
||||
|
||||
> For more complex scenarios (exposing port, different version and so on), run `join-node.bash`
|
||||
> For more complex scenarios (exposing port, different version and so on), run `join-node.bash`.
|
||||
|
||||
Tenant control plane provision has been finished in a minimal Kamaji setup based on KinD. Therefore, you could develop, test and make your own experiments with Kamaji.
|
||||
|
||||
## Cleanup
|
||||
|
||||
```bash
|
||||
$ make destroy
|
||||
$ make -C deploy/kind destroy
|
||||
```
|
||||
|
||||
@@ -1,12 +1,19 @@
|
||||
# Setup Kamaji on Azure
|
||||
This guide will lead you through the process of creating a working Kamaji setup on on MS Azure. It requires:
|
||||
This guide will lead you through the process of creating a working Kamaji setup on on MS Azure.
|
||||
|
||||
- one bootstrap local workstation
|
||||
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
|
||||
|
||||
The guide requires:
|
||||
|
||||
- one bootstrap workstation
|
||||
- an AKS Kubernetes cluster to run the Admin and Tenant Control Planes
|
||||
- an arbitrary number of Azure virtual machines to host `Tenant`s' workloads
|
||||
|
||||
## Summary
|
||||
|
||||
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
|
||||
* [Access Admin cluster](#access-admin-cluster)
|
||||
* [Install DataStore](#install-datastore)
|
||||
* [Install Kamaji controller](#install-kamaji-controller)
|
||||
* [Create Tenant Cluster](#create-tenant-cluster)
|
||||
* [Cleanup](#cleanup)
|
||||
@@ -21,10 +28,10 @@ cd kamaji/deploy
|
||||
|
||||
We assume you have installed on your workstation:
|
||||
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
|
||||
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
|
||||
- [helm](https://helm.sh/docs/intro/install/)
|
||||
- [jq](https://stedolan.github.io/jq/)
|
||||
- [openssl](https://www.openssl.org/)
|
||||
- [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)
|
||||
|
||||
Make sure you have a valid Azure subscription, and login to Azure:
|
||||
@@ -47,20 +54,37 @@ az group create \
|
||||
--name $KAMAJI_RG \
|
||||
--location $KAMAJI_REGION
|
||||
|
||||
az network vnet create \
|
||||
--resource-group $KAMAJI_RG \
|
||||
--name $KAMAJI_VNET_NAME \
|
||||
--location $KAMAJI_REGION \
|
||||
--address-prefix $KAMAJI_VNET_ADDRESS
|
||||
|
||||
az network vnet subnet create \
|
||||
--resource-group $KAMAJI_RG \
|
||||
--name $KAMAJI_SUBNET_NAME \
|
||||
--vnet-name $KAMAJI_VNET_NAME \
|
||||
--address-prefixes $KAMAJI_SUBNET_ADDRESS
|
||||
|
||||
KAMAJI_SUBNET_ID=$(az network vnet subnet show \
|
||||
--resource-group ${KAMAJI_RG} \
|
||||
--vnet-name ${KAMAJI_VNET_NAME} \
|
||||
--name ${KAMAJI_SUBNET_NAME} \
|
||||
--query id --output tsv)
|
||||
|
||||
az aks create \
|
||||
--resource-group $KAMAJI_RG \
|
||||
--name $KAMAJI_CLUSTER \
|
||||
--location $KAMAJI_REGION \
|
||||
--vnet-subnet-id $KAMAJI_SUBNET_ID \
|
||||
--zones 1 2 3 \
|
||||
--node-count 3 \
|
||||
--nodepool-name $KAMAJI_CLUSTER \
|
||||
--ssh-key-value @~/.ssh/id_rsa.pub \
|
||||
--no-wait
|
||||
--nodepool-name $KAMAJI_CLUSTER
|
||||
```
|
||||
|
||||
Once the cluster formation succedes, get credentials to access the cluster as admin
|
||||
|
||||
```
|
||||
```bash
|
||||
az aks get-credentials \
|
||||
--resource-group $KAMAJI_RG \
|
||||
--name $KAMAJI_CLUSTER
|
||||
@@ -68,41 +92,43 @@ az aks get-credentials \
|
||||
|
||||
And check you can access:
|
||||
|
||||
```
|
||||
```bash
|
||||
kubectl cluster-info
|
||||
```
|
||||
|
||||
## Install Kamaji
|
||||
There are multiple ways to deploy Kamaji, including a [single YAML file](../config/install.yaml) and [Helm Chart](../charts/kamaji).
|
||||
## Install datastore
|
||||
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters. The [Helm Chart](../charts/kamaji/) provides the installation of an unamanaged `etcd`. However, a managed `etcd` is highly recommended in production.
|
||||
|
||||
### Multi-tenant datastore
|
||||
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters.
|
||||
Install a multi-tenant `etcd` in the admin cluster as three replicas StatefulSet with data persistence.
|
||||
The Helm [Chart](../charts/kamaji/) provides the installation of an internal `etcd`.
|
||||
However, an externally managed `etcd` is highly recommended.
|
||||
If you'd like to use an external one, you can specify the overrides by setting the value `etcd.deploy=false`.
|
||||
|
||||
Optionally, Kamaji offers the possibility of using a different storage system than `etcd` for the tenants' clusters, like MySQL or PostgreSQL compatible database, thanks to the [kine](https://github.com/k3s-io/kine) integration documented [here](../deploy/kine/README.md).
|
||||
|
||||
### Install with Helm Chart
|
||||
Install with the `helm` in a dedicated namespace of the Admin cluster:
|
||||
The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a manged multi-tenant `etcd` as 3 replicas StatefulSet with data persistence:
|
||||
|
||||
```bash
|
||||
helm install --create-namespace --namespace kamaji-system kamaji clastix/kamaji
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm install etcd clastix/kamaji-etcd -n kamaji-system --create-namespace
|
||||
```
|
||||
|
||||
The Kamaji controller and the multi-tenant `etcd` are now running:
|
||||
Optionally, Kamaji offers the possibility of using a different storage system for the tenants' clusters, as MySQL or PostgreSQL compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
|
||||
|
||||
## Install Kamaji Controller
|
||||
There are multiple ways to deploy Kamaji, including a [single YAML file](../config/install.yaml) and the [Helm Chart](../charts/kamaji).
|
||||
|
||||
Install with `helm` using an unmanaged `etcd` as datastore:
|
||||
|
||||
```bash
|
||||
kubectl -n kamaji-system get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
etcd-0 1/1 Running 0 120m
|
||||
etcd-1 1/1 Running 0 120m
|
||||
etcd-2 1/1 Running 0 119m
|
||||
kamaji-857fcdf599-4fb2p 2/2 Running 0 120m
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
|
||||
```
|
||||
|
||||
You just turned your AKS cluster into a Kamaji cluster to run multiple Tenant Control Planes.
|
||||
Alternatively, if you opted for a managed `etcd` datastore:
|
||||
|
||||
```
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace --set etcd.deploy=false
|
||||
```
|
||||
|
||||
Congratulations! You just turned your Azure Kubernetes AKS cluster into a Kamaji cluster capable to run multiple Tenant Control Planes.
|
||||
|
||||
## Create Tenant Cluster
|
||||
|
||||
@@ -135,17 +161,17 @@ spec:
|
||||
resources:
|
||||
apiServer:
|
||||
requests:
|
||||
cpu: 500m
|
||||
cpu: 250m
|
||||
memory: 512Mi
|
||||
limits: {}
|
||||
controllerManager:
|
||||
requests:
|
||||
cpu: 250m
|
||||
cpu: 125m
|
||||
memory: 256Mi
|
||||
limits: {}
|
||||
scheduler:
|
||||
requests:
|
||||
cpu: 250m
|
||||
cpu: 125m
|
||||
memory: 256Mi
|
||||
limits: {}
|
||||
service:
|
||||
@@ -198,31 +224,30 @@ spec:
|
||||
type: LoadBalancer
|
||||
EOF
|
||||
|
||||
kubectl create namespace ${TENANT_NAMESPACE}
|
||||
kubectl apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
|
||||
kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
|
||||
```
|
||||
|
||||
Make sure:
|
||||
|
||||
- the following annotation: `service.beta.kubernetes.io/azure-load-balancer-internal=true` is set on the `tcp` service. It tells Azure to expose the service within an internal loadbalancer.
|
||||
|
||||
- the following annotation: `service.beta.kubernetes.io/azure-dns-label-name=${TENANT_NAME}` is set the public loadbalancer service. It tells Azure to expose the Tenant Control Plane with domain name: `${TENANT_NAME}.${TENANT_DOMAIN}`.
|
||||
- the following annotation: `service.beta.kubernetes.io/azure-dns-label-name=${TENANT_NAME}` is set the public loadbalancer service. It tells Azure to expose the Tenant Control Plane with public domain name: `${TENANT_NAME}.${TENANT_DOMAIN}`.
|
||||
|
||||
### Working with Tenant Control Plane
|
||||
|
||||
Check the access to the Tenant Control Plane:
|
||||
|
||||
```
|
||||
```bash
|
||||
curl -k https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.azure.com/healthz
|
||||
curl -k https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.azure.com/version
|
||||
```
|
||||
|
||||
Let's retrieve the `kubeconfig` in order to work with it:
|
||||
|
||||
```
|
||||
```bash
|
||||
kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-admin-kubeconfig -o json \
|
||||
| jq -r '.data["admin.conf"]' \
|
||||
| base64 -d \
|
||||
| base64 --decode \
|
||||
> ${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
|
||||
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig config \
|
||||
@@ -248,118 +273,66 @@ NAME ENDPOINTS AGE
|
||||
kubernetes 10.240.0.100:6443 57m
|
||||
```
|
||||
|
||||
### Prepare the Infrastructure for the Tenant virtual machines
|
||||
Kamaji provides Control Plane as a Service, so the tenant user can join his own virtual machines as worker nodes. Each tenant can place his virtual machines in a dedicated Azure virtual network.
|
||||
### Preparing Worker Nodes to join
|
||||
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other tools, as for example, Terrform.
|
||||
|
||||
Prepare the Tenant infrastructure:
|
||||
|
||||
```
|
||||
az group create \
|
||||
--name $TENANT_RG \
|
||||
--location $KAMAJI_REGION
|
||||
|
||||
az network nsg create \
|
||||
--resource-group $TENANT_RG \
|
||||
--name $TENANT_NSG
|
||||
|
||||
az network nsg rule create \
|
||||
--resource-group $TENANT_RG \
|
||||
--nsg-name $TENANT_NSG \
|
||||
--name $TENANT_NSG-ssh \
|
||||
--protocol tcp \
|
||||
--priority 1000 \
|
||||
--destination-port-range 22 \
|
||||
--access allow
|
||||
|
||||
az network vnet create \
|
||||
--resource-group $TENANT_RG \
|
||||
--name $TENANT_VNET_NAME \
|
||||
--address-prefix $TENANT_VNET_ADDRESS \
|
||||
--subnet-name $TENANT_SUBNET_NAME \
|
||||
--subnet-prefix $TENANT_SUBNET_ADDRESS
|
||||
|
||||
az network vnet subnet create \
|
||||
--resource-group $TENANT_RG \
|
||||
--vnet-name $TENANT_VNET_NAME \
|
||||
--name $TENANT_SUBNET_NAME \
|
||||
--address-prefixes $TENANT_SUBNET_ADDRESS \
|
||||
--network-security-group $TENANT_NSG
|
||||
```
|
||||
|
||||
Connection between the Tenant virtual network and the Kamaji AKS virtual network leverages on the [Azure Network Peering](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-peering-overview).
|
||||
|
||||
Enable the network peering between the Tenant Virtual Network and the Kamaji AKS Virtual Network:
|
||||
Create an Azure VM Stateful Set to host worker nodes
|
||||
|
||||
```bash
|
||||
KAMAJI_VNET_NAME=`az network vnet list -g $KAMAJI_NODE_RG --query [].name --out tsv`
|
||||
KAMAJI_VNET_ID=`az network vnet list -g $KAMAJI_NODE_RG --query [].id --out tsv`
|
||||
TENANT_VNET_ID=`az network vnet list -g $TENANT_RG --query [].id --out tsv`
|
||||
|
||||
az network vnet peering create \
|
||||
--resource-group $TENANT_RG \
|
||||
--name $TENANT_NAME-$KAMAJI_CLUSTER \
|
||||
--vnet-name $TENANT_VNET_NAME \
|
||||
--remote-vnet $KAMAJI_VNET_ID \
|
||||
--allow-vnet-access
|
||||
|
||||
az network vnet peering create \
|
||||
--resource-group $KAMAJI_NODE_RG \
|
||||
--name $KAMAJI_CLUSTER-$TENANT_NAME \
|
||||
az network vnet subnet create \
|
||||
--resource-group $KAMAJI_RG \
|
||||
--name $TENANT_SUBNET_NAME \
|
||||
--vnet-name $KAMAJI_VNET_NAME \
|
||||
--remote-vnet $TENANT_VNET_ID \
|
||||
--allow-vnet-access
|
||||
```
|
||||
--address-prefixes $TENANT_SUBNET_ADDRESS
|
||||
|
||||
[Azure Network Security Groups](https://docs.microsoft.com/en-us/azure/virtual-network/network-security-groups-overview) can be used to control the traffic between the Tenant virtual network and the Kamaji AKS virtual network for a stronger isolation. See the required [ports and protocols](https://kubernetes.io/docs/reference/ports-and-protocols/) between Kubernetes control plane and worker nodes.
|
||||
|
||||
### Create the tenant virtual machines
|
||||
Create an Azure VM Stateful Set to host virtual machines
|
||||
|
||||
```
|
||||
az vmss create \
|
||||
--name $TENANT_VMSS \
|
||||
--resource-group $TENANT_RG \
|
||||
--resource-group $KAMAJI_RG \
|
||||
--image $TENANT_VM_IMAGE \
|
||||
--public-ip-per-vm \
|
||||
--vnet-name $TENANT_VNET_NAME \
|
||||
--vnet-name $KAMAJI_VNET_NAME \
|
||||
--subnet $TENANT_SUBNET_NAME \
|
||||
--ssh-key-value @~/.ssh/id_rsa.pub \
|
||||
--computer-name-prefix $TENANT_NAME- \
|
||||
--nsg $TENANT_NSG \
|
||||
--custom-data ./tenant-cloudinit.yaml \
|
||||
--instance-count 0
|
||||
--load-balancer "" \
|
||||
--instance-count 0
|
||||
|
||||
az vmss update \
|
||||
--resource-group $TENANT_RG \
|
||||
--resource-group $KAMAJI_RG \
|
||||
--name $TENANT_VMSS \
|
||||
--set virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].enableIPForwarding=true
|
||||
|
||||
az vmss scale \
|
||||
--resource-group $TENANT_RG \
|
||||
--resource-group $KAMAJI_RG \
|
||||
--name $TENANT_VMSS \
|
||||
--new-capacity 3
|
||||
```
|
||||
|
||||
### Join the tenant virtual machines to the tenant control plane
|
||||
The current approach for joining nodes is to use the `kubeadm` one therefore, we will create a bootstrap token to perform the action:
|
||||
The current approach for joining nodes is to use `kubeadm` and therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable:
|
||||
|
||||
```bash
|
||||
TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."spec.loadBalancerIP")
|
||||
|
||||
JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command |cut -d" " -f4-)
|
||||
```
|
||||
|
||||
A bash loop will be used to join all the available nodes.
|
||||
|
||||
```bash
|
||||
HOSTS=($(az vmss list-instance-public-ips \
|
||||
--resource-group $TENANT_RG \
|
||||
--name $TENANT_VMSS \
|
||||
--query "[].ipAddress" \
|
||||
--output tsv))
|
||||
VMIDS=($(az vmss list-instances \
|
||||
--resource-group $KAMAJI_RG \
|
||||
--name $TENANT_VMSS \
|
||||
--query [].instanceId \
|
||||
--output tsv))
|
||||
|
||||
for i in ${!HOSTS[@]}; do
|
||||
HOST=${HOSTS[$i]}
|
||||
echo $HOST
|
||||
ssh ${USER}@${HOST} -t ${JOIN_CMD};
|
||||
for i in ${!VMIDS[@]}; do
|
||||
VMID=${VMIDS[$i]}
|
||||
az vmss run-command create \
|
||||
--name join-tenant-control-plane \
|
||||
--vmss-name $TENANT_VMSS \
|
||||
--resource-group $KAMAJI_RG \
|
||||
--instance-id ${VMID} \
|
||||
--script "${JOIN_CMD}"
|
||||
done
|
||||
```
|
||||
|
||||
@@ -369,51 +342,43 @@ Checking the nodes:
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
|
||||
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
tenant-00-000000 NotReady <none> 112s v1.23.5
|
||||
tenant-00-000002 NotReady <none> 92s v1.23.5
|
||||
tenant-00-000003 NotReady <none> 71s v1.23.5
|
||||
tenant-00-000000 NotReady <none> 112s v1.25.0
|
||||
tenant-00-000002 NotReady <none> 92s v1.25.0
|
||||
tenant-00-000003 NotReady <none> 71s v1.25.0
|
||||
```
|
||||
|
||||
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In our case, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico).
|
||||
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In this guide, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico), but feel free to use one of your taste.
|
||||
|
||||
Download the latest stable Calico manifest:
|
||||
|
||||
```bash
|
||||
kubectl apply -f calico-cni/calico-crd.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
|
||||
kubectl apply -f calico-cni/calico-azure.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
|
||||
curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/calico.yaml -O
|
||||
```
|
||||
|
||||
And after a while, `kube-system` pods will be running.
|
||||
As per [documentation](https://projectcalico.docs.tigera.io/reference/public-cloud/azure), Calico in VXLAN mode is supported on Azure while IPIP packets are blocked by the Azure network fabric. Make sure you edit the manifest above and set the following variables:
|
||||
|
||||
- `CLUSTER_TYPE="k8s"`
|
||||
- `CALICO_IPV4POOL_IPIP="Never"`
|
||||
- `CALICO_IPV4POOL_VXLAN="Always"`
|
||||
|
||||
Apply to the tenant cluster:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get po -n kube-system
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
calico-kube-controllers-8594699699-dlhbj 1/1 Running 0 3m
|
||||
calico-node-kxf6n 1/1 Running 0 3m
|
||||
calico-node-qtdlw 1/1 Running 0 3m
|
||||
coredns-64897985d-2v5lc 1/1 Running 0 5m
|
||||
coredns-64897985d-nq276 1/1 Running 0 5m
|
||||
kube-proxy-cwdww 1/1 Running 0 3m
|
||||
kube-proxy-m48v4 1/1 Running 0 3m
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig apply -f calico.yaml
|
||||
```
|
||||
|
||||
And the nodes will be ready
|
||||
And after a while, nodes will be ready
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
|
||||
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
tenant-00-000000 Ready <none> 3m38s v1.23.5
|
||||
tenant-00-000002 Ready <none> 3m18s v1.23.5
|
||||
tenant-00-000003 Ready <none> 2m57s v1.23.5
|
||||
tenant-00-000000 Ready <none> 3m38s v1.25.0
|
||||
tenant-00-000002 Ready <none> 3m18s v1.25.0
|
||||
tenant-00-000003 Ready <none> 2m57s v1.25.0
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
To get rid of the Tenant infrastructure, remove the RESOURCE_GROUP:
|
||||
|
||||
```
|
||||
az group delete --name $TENANT_RG --yes --no-wait
|
||||
```
|
||||
|
||||
To get rid of the Kamaji infrastructure, remove the RESOURCE_GROUP:
|
||||
|
||||
```
|
||||
|
||||
@@ -1,14 +1,19 @@
|
||||
# Setup Kamaji
|
||||
This guide will lead you through the process of creating a working Kamaji setup on a generic Kubernetes cluster. It requires:
|
||||
# Setup Kamaji on a generic infrastructure
|
||||
This guide will lead you through the process of creating a working Kamaji setup on a generic infrastructure, both virtual or bare metal.
|
||||
|
||||
- one bootstrap local workstation
|
||||
- a Kubernetes cluster 1.22+, to run the Admin and Tenant Control Planes
|
||||
- an arbitrary number of machines to host Tenants' workloads
|
||||
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
|
||||
|
||||
> In this guide, we assume the machines are running `Ubuntu 20.04`.
|
||||
The guide requires:
|
||||
|
||||
- one bootstrap workstation
|
||||
- a Kubernetes cluster to run the Admin and Tenant Control Planes
|
||||
- an arbitrary number of machines to host `Tenant`s' workloads
|
||||
|
||||
## Summary
|
||||
|
||||
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
|
||||
* [Access Admin cluster](#access-admin-cluster)
|
||||
* [Install DataStore](#install-datastore)
|
||||
* [Install Kamaji controller](#install-kamaji-controller)
|
||||
* [Create Tenant Cluster](#create-tenant-cluster)
|
||||
* [Cleanup](#cleanup)
|
||||
@@ -23,10 +28,10 @@ cd kamaji/deploy
|
||||
|
||||
We assume you have installed on your workstation:
|
||||
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
|
||||
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
|
||||
- [helm](https://helm.sh/docs/intro/install/)
|
||||
- [jq](https://stedolan.github.io/jq/)
|
||||
- [openssl](https://www.openssl.org/)
|
||||
|
||||
## Access Admin cluster
|
||||
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters.
|
||||
@@ -37,42 +42,48 @@ Throughout the following instructions, shell variables are used to indicate valu
|
||||
source kamaji.env
|
||||
```
|
||||
|
||||
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the admin cluster should provide:
|
||||
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the admin cluster should provide at least:
|
||||
|
||||
- CNI module installed, eg. [Calico](https://github.com/projectcalico/calico), [Cilium](https://github.com/cilium/cilium).
|
||||
- CSI module installed with a Storage Class for the Tenants' `etcd`.
|
||||
- CSI module installed with a Storage Class for the Tenants' `etcd`. Local Persistent Volumes are an option.
|
||||
- Support for LoadBalancer Service Type, or alternatively, an Ingress Controller, eg. [ingress-nginx](https://github.com/kubernetes/ingress-nginx), [haproxy](https://github.com/haproxytech/kubernetes-ingress).
|
||||
- Monitoring Stack, eg. [Prometheus](https://github.com/prometheus-community).
|
||||
|
||||
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into Kamaji Admin Cluster.
|
||||
|
||||
## Install Kamaji
|
||||
There are multiple ways to deploy Kamaji, including a [single YAML file](../config/install.yaml) and [Helm Chart](../charts/kamaji).
|
||||
## Install datastore
|
||||
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters. The [Helm Chart](../charts/kamaji/) provides the installation of an unamanaged `etcd`. However, a managed `etcd` is highly recommended in production.
|
||||
|
||||
### Multi-tenant datastore
|
||||
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters. Install a multi-tenant `etcd` in the admin cluster as three replicas StatefulSet with data persistence. The Helm [Chart](../charts/kamaji/) provides the installation of an internal `etcd`. However, an externally managed `etcd` is highly recommended. If you'd like to use an external one, you can specify the overrides by setting the value `etcd.deploy=false`.
|
||||
|
||||
Optionally, Kamaji offers the possibility of using a different storage system than `etcd` for the tenants' clusters, like MySQL compatible database, thanks to the [kine](https://github.com/k3s-io/kine) integration [here](../deploy/kine/mysql/README.md).
|
||||
|
||||
### Install with Helm Chart
|
||||
Install with the `helm` in a dedicated namespace of the Admin cluster:
|
||||
The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a manged multi-tenant `etcd` as 3 replicas StatefulSet with data persistence:
|
||||
|
||||
```bash
|
||||
helm install --create-namespace --namespace kamaji-system kamaji clastix/kamaji
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm install etcd clastix/kamaji-etcd -n kamaji-system --create-namespace
|
||||
```
|
||||
|
||||
The Kamaji controller and the multi-tenant `etcd` are now running:
|
||||
Optionally, Kamaji offers the possibility of using a different storage system for the tenants' clusters, as MySQL or PostgreSQL compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
|
||||
|
||||
## Install Kamaji Controller
|
||||
There are multiple ways to deploy Kamaji, including a [single YAML file](../config/install.yaml) and the [Helm Chart](../charts/kamaji).
|
||||
|
||||
Install with `helm` using an unmanaged `etcd` as datastore:
|
||||
|
||||
```bash
|
||||
kubectl -n kamaji-system get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
etcd-0 1/1 Running 0 120m
|
||||
etcd-1 1/1 Running 0 120m
|
||||
etcd-2 1/1 Running 0 119m
|
||||
kamaji-857fcdf599-4fb2p 2/2 Running 0 120m
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
|
||||
```
|
||||
|
||||
You just turned your Kubernetes cluster into a Kamaji cluster to run multiple Tenant Control Planes.
|
||||
Alternatively, if you opted for a managed `etcd` datastore:
|
||||
|
||||
```bash
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace --set etcd.deploy=false
|
||||
```
|
||||
|
||||
Congratulations! You just turned your Kubernetes cluster into a Kamaji cluster capable to run multiple Tenant Control Planes.
|
||||
|
||||
## Create Tenant Cluster
|
||||
|
||||
@@ -101,17 +112,17 @@ spec:
|
||||
resources:
|
||||
apiServer:
|
||||
requests:
|
||||
cpu: 500m
|
||||
cpu: 250m
|
||||
memory: 512Mi
|
||||
limits: {}
|
||||
controllerManager:
|
||||
requests:
|
||||
cpu: 250m
|
||||
cpu: 125m
|
||||
memory: 256Mi
|
||||
limits: {}
|
||||
scheduler:
|
||||
requests:
|
||||
cpu: 250m
|
||||
cpu: 125m
|
||||
memory: 256Mi
|
||||
limits: {}
|
||||
service:
|
||||
@@ -146,8 +157,7 @@ spec:
|
||||
limits: {}
|
||||
EOF
|
||||
|
||||
kubectl create namespace ${TENANT_NAMESPACE}
|
||||
kubectl apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
|
||||
kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
|
||||
```
|
||||
|
||||
After a few minutes, check the created resources in the tenants namespace and when ready it will look similar to the following:
|
||||
@@ -171,39 +181,8 @@ service/tenant-00 LoadBalancer 10.32.132.241 192.168.32.240 6443:32152/T
|
||||
|
||||
The regular Tenant Control Plane containers: `kube-apiserver`, `kube-controller-manager`, `kube-scheduler` are running unchanged in the `tcp` pods instead of dedicated machines and they are exposed through a service on the port `6443` of worker nodes in the Admin cluster.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: tenant-00
|
||||
spec:
|
||||
clusterIP: 10.32.233.177
|
||||
loadBalancerIP: 192.168.32.240
|
||||
ports:
|
||||
- name: kube-apiserver
|
||||
nodePort: 31073
|
||||
port: 6443
|
||||
protocol: TCP
|
||||
targetPort: 6443
|
||||
- name: konnectivity-server
|
||||
nodePort: 32125
|
||||
port: 8132
|
||||
protocol: TCP
|
||||
targetPort: 8132
|
||||
selector:
|
||||
kamaji.clastix.io/soot: tenant-00
|
||||
type: LoadBalancer
|
||||
```
|
||||
|
||||
The `LoadBalancer` service type is used to expose the Tenant Control Plane. However, `NodePort` and `ClusterIP` with an Ingress Controller are still viable options, depending on the case. High Availability and rolling updates of the Tenant Control Plane are provided by the `tcp` Deployment and all the resources reconcilied by the Kamaji controller.
|
||||
|
||||
### Konnectivity
|
||||
In addition to the standard control plane containers, Kamaji creates an instance of [konnectivity-server](https://kubernetes.io/docs/concepts/architecture/control-plane-node-communication/) running as sidecar container in the `tcp` pod and exposed on port `8132` of the `tcp` service.
|
||||
|
||||
This is required when the tenant worker nodes are not reachable from the `tcp` pods. The Konnectivity service consists of two parts: the Konnectivity server in the tenant control plane pod and the Konnectivity agents running on the tenant worker nodes. After worker nodes joined the tenant control plane, the Konnectivity agents initiate connections to the Konnectivity server and maintain the network connections. After enabling the Konnectivity service, all control plane to worker nodes traffic goes through these connections.
|
||||
|
||||
> In Kamaji, Konnectivity is enabled by default and can be disabled when not required.
|
||||
|
||||
### Working with Tenant Control Plane
|
||||
|
||||
Collect the external IP address of the `tcp` service:
|
||||
@@ -224,7 +203,7 @@ The `kubeconfig` required to access the Tenant Control Plane is stored in a secr
|
||||
```bash
|
||||
kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-admin-kubeconfig -o json \
|
||||
| jq -r '.data["admin.conf"]' \
|
||||
| base64 -d \
|
||||
| base64 --decode \
|
||||
> ${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
|
||||
```
|
||||
|
||||
@@ -257,17 +236,17 @@ And make sure it is `${TENANT_ADDR}:${TENANT_PORT}`.
|
||||
|
||||
### Preparing Worker Nodes to join
|
||||
|
||||
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other IaC tools.
|
||||
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other tools, as for example, Terrform.
|
||||
|
||||
Use bash script `nodes-prerequisites.sh` to install the dependencies on all the worker nodes:
|
||||
Use a simple bash script `nodes-prerequisites.sh`, as provided into this repo, in order to install the dependencies on all the worker nodes:
|
||||
|
||||
- Install `containerd` as container runtime
|
||||
- Install `crictl`, the command line for working with `containerd`
|
||||
- Install `kubectl`, `kubelet`, and `kubeadm` in the desired version
|
||||
|
||||
> Warning: we assume worker nodes are machines running `Ubuntu 20.04`
|
||||
> Warning: the script assumes all worker nodes are running `Ubuntu 20.04`. Make sure to adapt the script if you're using a different distribution.
|
||||
|
||||
Run the installation script:
|
||||
Run the script:
|
||||
|
||||
```bash
|
||||
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
|
||||
@@ -276,7 +255,7 @@ HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
|
||||
|
||||
### Join Command
|
||||
|
||||
The current approach for joining nodes is to use the kubeadm one therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable.
|
||||
The current approach for joining nodes is to use `kubeadm` and therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable:
|
||||
|
||||
```bash
|
||||
JOIN_CMD=$(echo "sudo ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command)
|
||||
@@ -287,6 +266,7 @@ JOIN_CMD=$(echo "sudo ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME
|
||||
A bash loop will be used to join all the available nodes.
|
||||
|
||||
```bash
|
||||
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
|
||||
for i in "${!HOSTS[@]}"; do
|
||||
HOST=${HOSTS[$i]}
|
||||
ssh ${USER}@${HOST} -t ${JOIN_CMD};
|
||||
@@ -299,163 +279,42 @@ Checking the nodes:
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
|
||||
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
tenant-00-worker-00 NotReady <none> 25s v1.23.5
|
||||
tenant-00-worker-01 NotReady <none> 17s v1.23.5
|
||||
tenant-00-worker-02 NotReady <none> 9s v1.23.5
|
||||
tenant-00-worker-00 NotReady <none> 25s v1.25.0
|
||||
tenant-00-worker-01 NotReady <none> 17s v1.25.0
|
||||
tenant-00-worker-02 NotReady <none> 9s v1.25.0
|
||||
```
|
||||
|
||||
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In our case, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico).
|
||||
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In this guide, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico), but feel free to use one of your taste.
|
||||
|
||||
Download the latest stable Calico manifest:
|
||||
|
||||
```bash
|
||||
kubectl apply -f calico-cni/calico-crd.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
|
||||
kubectl apply -f calico-cni/calico.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
|
||||
curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/calico.yaml -O
|
||||
```
|
||||
|
||||
And after a while, `kube-system` pods will be running.
|
||||
Before to apply the Calico manifest, you can customize it as necessary according to your preferences.
|
||||
|
||||
Apply to the tenant cluster:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get pods -n kube-system
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
calico-kube-controllers-8594699699-dlhbj 1/1 Running 0 3m
|
||||
calico-node-kxf6n 1/1 Running 0 3m
|
||||
calico-node-qtdlw 1/1 Running 0 3m
|
||||
coredns-64897985d-2v5lc 1/1 Running 0 5m
|
||||
coredns-64897985d-nq276 1/1 Running 0 5m
|
||||
kube-proxy-cwdww 1/1 Running 0 3m
|
||||
kube-proxy-m48v4 1/1 Running 0 3m
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig apply -f calico.yaml
|
||||
```
|
||||
|
||||
And the nodes will be ready
|
||||
And after a while, nodes will be ready
|
||||
|
||||
```bash
|
||||
kubectl get nodes --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
tenant-00-worker-00 Ready <none> 2m48s v1.23.5
|
||||
tenant-00-worker-01 Ready <none> 2m40s v1.23.5
|
||||
tenant-00-worker-02 Ready <none> 2m32s v1.23.5
|
||||
```
|
||||
|
||||
## Smoke test
|
||||
|
||||
The tenant cluster is now ready to accept workloads.
|
||||
|
||||
Export its `kubeconfig` file
|
||||
|
||||
```bash
|
||||
export KUBECONFIG=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
|
||||
```
|
||||
|
||||
#### Deployment
|
||||
Deploy a `nginx` application on the tenant cluster
|
||||
|
||||
```bash
|
||||
kubectl create deployment nginx --image=nginx
|
||||
```
|
||||
|
||||
and check the `nginx` pod gets scheduled
|
||||
|
||||
```bash
|
||||
kubectl get pods -o wide
|
||||
|
||||
NAME READY STATUS RESTARTS AGE IP NODE
|
||||
nginx-6799fc88d8-4sgcb 1/1 Running 0 33s 172.12.121.1 worker02
|
||||
```
|
||||
|
||||
#### Port Forwarding
|
||||
Verify the ability to access applications remotely using port forwarding.
|
||||
|
||||
Retrieve the full name of the `nginx` pod:
|
||||
|
||||
```bash
|
||||
POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath="{.items[0].metadata.name}")
|
||||
```
|
||||
|
||||
Forward port 8080 on your local machine to port 80 of the `nginx` pod:
|
||||
|
||||
```bash
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
|
||||
Forwarding from 127.0.0.1:8080 -> 80
|
||||
Forwarding from [::1]:8080 -> 80
|
||||
```
|
||||
|
||||
In a new terminal make an HTTP request using the forwarding address:
|
||||
|
||||
```bash
|
||||
curl --head http://127.0.0.1:8080
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Server: nginx/1.21.0
|
||||
Date: Sat, 19 Jun 2021 08:19:01 GMT
|
||||
Content-Type: text/html
|
||||
Content-Length: 612
|
||||
Last-Modified: Tue, 25 May 2021 12:28:56 GMT
|
||||
Connection: keep-alive
|
||||
ETag: "60aced88-264"
|
||||
Accept-Ranges: bytes
|
||||
```
|
||||
|
||||
Switch back to the previous terminal and stop the port forwarding to the `nginx` pod.
|
||||
|
||||
#### Logs
|
||||
Verify the ability to retrieve container logs.
|
||||
|
||||
Print the `nginx` pod logs:
|
||||
|
||||
```bash
|
||||
kubectl logs $POD_NAME
|
||||
...
|
||||
127.0.0.1 - - [19/Jun/2021:08:19:01 +0000] "HEAD / HTTP/1.1" 200 0 "-" "curl/7.68.0" "-"
|
||||
```
|
||||
|
||||
#### Kubelet tunnel
|
||||
Verify the ability to execute commands in a container.
|
||||
|
||||
Print the `nginx` version by executing the `nginx -v` command in the `nginx` container:
|
||||
|
||||
```bash
|
||||
kubectl exec -ti $POD_NAME -- nginx -v
|
||||
nginx version: nginx/1.21.0
|
||||
```
|
||||
|
||||
#### Services
|
||||
Verify the ability to expose applications using a service.
|
||||
|
||||
Expose the `nginx` deployment using a `NodePort` service:
|
||||
|
||||
```bash
|
||||
kubectl expose deployment nginx --port 80 --type NodePort
|
||||
```
|
||||
|
||||
Retrieve the node port assigned to the `nginx` service:
|
||||
|
||||
```bash
|
||||
NODE_PORT=$(kubectl get svc nginx \
|
||||
--output=jsonpath='{range .spec.ports[0]}{.nodePort}')
|
||||
```
|
||||
|
||||
Retrieve the IP address of a worker instance and make an HTTP request:
|
||||
|
||||
```bash
|
||||
curl -I http://${WORKER0}:${NODE_PORT}
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Server: nginx/1.21.0
|
||||
Date: Sat, 19 Jun 2021 09:29:01 GMT
|
||||
Content-Type: text/html
|
||||
Content-Length: 612
|
||||
Last-Modified: Tue, 25 May 2021 12:28:56 GMT
|
||||
Connection: keep-alive
|
||||
ETag: "60aced88-264"
|
||||
Accept-Ranges: bytes
|
||||
tenant-00-worker-00 Ready <none> 2m48s v1.25.0
|
||||
tenant-00-worker-01 Ready <none> 2m40s v1.25.0
|
||||
tenant-00-worker-02 Ready <none> 2m32s v1.25.0
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
Remove the worker nodes joined the tenant control plane
|
||||
|
||||
```bash
|
||||
kubectl delete nodes --all --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig delete nodes --all
|
||||
```
|
||||
|
||||
For each worker node, login and clean it
|
||||
@@ -475,3 +334,5 @@ Delete the tenant control plane from kamaji
|
||||
```bash
|
||||
kubectl delete -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
|
||||
```
|
||||
|
||||
That's all folks!
|
||||
9
docs/konnectivity.md
Normal file
9
docs/konnectivity.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Set up Konnectivity service
|
||||
|
||||
In addition to the standard control plane containers, Kamaji creates an instance of [konnectivity-server](https://kubernetes.io/docs/concepts/architecture/control-plane-node-communication/) running as sidecar container in the `tcp` pod and exposed on port `8132` of the `tcp` service.
|
||||
|
||||
This is required when the tenant worker nodes are not reachable from the `tcp` pods. The Konnectivity service consists of two parts: the Konnectivity server in the tenant control plane pod and the Konnectivity agents running on the tenant worker nodes.
|
||||
|
||||
After worker nodes joined the tenant control plane, the Konnectivity agents initiate connections to the Konnectivity server and maintain the network connections. After enabling the Konnectivity service, all control plane to worker nodes traffic goes through these connections.
|
||||
|
||||
> In Kamaji, Konnectivity is enabled by default and can be disabled when not required.
|
||||
5
docs/mysql-datastore.md
Normal file
5
docs/mysql-datastore.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# MySQL as Kubernetes Storage
|
||||
|
||||
Kamaji offers the possibility of having a different storage system than `ETCD` thanks to [kine](https://github.com/k3s-io/kine) integration. One of the implementations is [MySQL](https://www.mysql.com/).
|
||||
|
||||
> A detailed guide for production setup will be released soon. Please refer to [Getting Started Guide](./getting-started-with-kamaji.md) for a demo setup with KinD.
|
||||
6
docs/postgresql-datastore.md
Normal file
6
docs/postgresql-datastore.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# PostgreSQL as Kubernetes Storage
|
||||
|
||||
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine) integration.
|
||||
One of the implementations is [PostgreSQL](https://www.postgresql.org/).
|
||||
|
||||
> A detailed guide for production setup will be released soon. Please refer to [Getting Started Guide](./getting-started-with-kamaji.md) for a demo setup with KinD.
|
||||
@@ -71,44 +71,13 @@ $ make yaml-installation-file
|
||||
|
||||
```
|
||||
|
||||
It will generate a yaml installation file at `config/install.yaml`. It should be customize accordingly.
|
||||
It will generate a yaml installation file at `config/install.yaml`. It should be customized accordingly.
|
||||
|
||||
## Tenant Control Planes
|
||||
## Custom Resource Definitions
|
||||
|
||||
**Kamaji** offers a [CRD](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing tenant control planes. This *CRD* is called `TenantControlPlane`, or `tcp` in short. Use the command `kubectl explain tcp.spec` to understand the fields and their usage.
|
||||
**Kamaji** offers a set of [CRD](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing tenant control planes:
|
||||
|
||||
### Add-ons
|
||||
- `TenantControlPlane`, or `tcp` in short
|
||||
- `DataStore`
|
||||
|
||||
**Kamaji** provides optional installations into the deployed tenant control plane through add-ons. Is it possible to enable/disable them through the `tcp` definition.
|
||||
|
||||
### Core DNS
|
||||
|
||||
```yaml
|
||||
addons:
|
||||
coreDNS: {}
|
||||
```
|
||||
|
||||
### Kube-Proxy
|
||||
|
||||
```yaml
|
||||
addons:
|
||||
kubeProxy: {}
|
||||
```
|
||||
|
||||
### Konnectivity
|
||||
|
||||
```yaml
|
||||
addons:
|
||||
konnectivity:
|
||||
proxyPort: 31132 # mandatory
|
||||
version: v0.0.32
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
serverImage: registry.k8s.io/kas-network-proxy/proxy-server
|
||||
agentImage: registry.k8s.io/kas-network-proxy/proxy-agent
|
||||
```
|
||||
For details, see [apireference](apireference.md).
|
||||
|
||||
94
docs/templates/reference-cr.tmpl
vendored
Normal file
94
docs/templates/reference-cr.tmpl
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
# API Reference
|
||||
|
||||
Packages:
|
||||
{{range .Groups}}
|
||||
- [{{.Group}}/{{.Version}}](#{{ anchorize (printf "%s/%s" .Group .Version) }})
|
||||
{{- end -}}{{/* range .Groups */}}
|
||||
|
||||
{{- range .Groups }}
|
||||
{{- $group := . }}
|
||||
|
||||
# {{.Group}}/{{.Version}}
|
||||
|
||||
Resource Types:
|
||||
{{range .Kinds}}
|
||||
- [{{.Name}}](#{{ anchorize .Name }})
|
||||
{{end}}{{/* range .Kinds */}}
|
||||
|
||||
{{range .Kinds}}
|
||||
{{$kind := .}}
|
||||
## {{.Name}}
|
||||
|
||||
{{range .Types}}
|
||||
|
||||
{{if not .IsTopLevel}}
|
||||
### {{.Name}}
|
||||
{{end}}
|
||||
|
||||
|
||||
{{.Description}}
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>Type</th>
|
||||
<th>Description</th>
|
||||
<th>Required</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{- if .IsTopLevel -}}
|
||||
<tr>
|
||||
<td><b>apiVersion</b></td>
|
||||
<td>string</td>
|
||||
<td>{{$group.Group}}/{{$group.Version}}</td>
|
||||
<td>true</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b>kind</b></td>
|
||||
<td>string</td>
|
||||
<td>{{$kind.Name}}</td>
|
||||
<td>true</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b><a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta">metadata</a></b></td>
|
||||
<td>object</td>
|
||||
<td>Refer to the Kubernetes API documentation for the fields of the `metadata` field.</td>
|
||||
<td>true</td>
|
||||
</tr>
|
||||
{{- end -}}
|
||||
{{- range .Fields -}}
|
||||
<tr>
|
||||
<td><b>{{if .TypeKey}}<a href="#{{.TypeKey}}">{{.Name}}</a>{{else}}{{.Name}}{{end}}</b></td>
|
||||
<td>{{.Type}}</td>
|
||||
<td>
|
||||
{{.Description}}<br/>
|
||||
{{- if or .Schema.Format .Schema.Enum .Schema.Default .Schema.Minimum .Schema.Maximum }}
|
||||
<br/>
|
||||
{{- end}}
|
||||
{{- if .Schema.Format }}
|
||||
<i>Format</i>: {{ .Schema.Format }}<br/>
|
||||
{{- end }}
|
||||
{{- if .Schema.Enum }}
|
||||
<i>Enum</i>: {{ .Schema.Enum | toStrings | join ", " }}<br/>
|
||||
{{- end }}
|
||||
{{- if .Schema.Default }}
|
||||
<i>Default</i>: {{ .Schema.Default }}<br/>
|
||||
{{- end }}
|
||||
{{- if .Schema.Minimum }}
|
||||
<i>Minimum</i>: {{ .Schema.Minimum }}<br/>
|
||||
{{- end }}
|
||||
{{- if .Schema.Maximum }}
|
||||
<i>Maximum</i>: {{ .Schema.Maximum }}<br/>
|
||||
{{- end }}
|
||||
</td>
|
||||
<td>{{.Required}}</td>
|
||||
</tr>
|
||||
{{- end -}}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
{{- end}}{{/* range .Types */}}
|
||||
{{- end}}{{/* range .Kinds */}}
|
||||
{{- end}}{{/* range .Groups */}}
|
||||
1
docs/upgrade.md
Normal file
1
docs/upgrade.md
Normal file
@@ -0,0 +1 @@
|
||||
# Tenant Cluster Upgrade
|
||||
@@ -3,6 +3,7 @@ In Kamaji, there are different components that might require independent version
|
||||
|
||||
|Kamaji|Admin Cluster|Tenant Cluster (min)|Tenant Cluster (max)|Konnectivity|Tenant etcd |
|
||||
|------|-------------|--------------------|--------------------|------------|------------|
|
||||
|0.0.1 |1.22.0+ |1.21.0 |1.23.x |0.0.32 |3.5.4 |
|
||||
|0.0.1 |1.22.0+ |1.21.0 |1.23.5 |0.0.31 |3.5.4 |
|
||||
|0.0.2 |1.22.0+ |1.21.0 |1.25.0 |0.0.32 |3.5.4 |
|
||||
|
||||
Other combinations might work but have not been tested.
|
||||
Other combinations might work but they have not been yet tested.
|
||||
Reference in New Issue
Block a user