Compare commits

...

43 Commits

Author SHA1 Message Date
Dario Tranchitella
aada5c29a2 chore(helm): releasing helm v0.11.3 2023-02-24 09:57:46 +01:00
Dario Tranchitella
cb4a493e28 chore(helm): bumping up to v0.2.1 2023-02-24 09:56:39 +01:00
Dario Tranchitella
f783aff3c0 chore(kustomize): bumping up to v0.2.1 2023-02-24 09:56:39 +01:00
Dario Tranchitella
c8bdaf0aa2 chore(makefile): bumping up to v0.2.1 2023-02-24 09:56:39 +01:00
Dario Tranchitella
d1c2fe020e feat: upgrading to kubernetes v1.26.1 2023-02-24 09:56:23 +01:00
Dario Tranchitella
5b93d7181f fix: avoiding secrets regeneration upon velero restore 2023-02-23 19:01:47 +01:00
Dario Tranchitella
1273d95340 feat(helm): using tolerations for jobs 2023-02-22 14:19:24 +01:00
Filippo Pinton
1e4c78b646 fix(helm): remove duplicate labels 2023-02-21 15:25:20 +01:00
Pietro Terrizzi
903cfc0bae docs(helm): added pvc customAnnotations 2023-02-15 18:07:14 +01:00
Pietro Terrizzi
7bd142bcb2 feat(helm): added customAnnotations to PVC 2023-02-15 18:07:14 +01:00
Pietro Terrizzi
153a43e6f2 chore: k8s.gcr.io is deprecated in favor of registry.k8s.io 2023-02-15 18:06:26 +01:00
Dario Tranchitella
2abaeb5586 docs: keeping labels consistent 2023-02-13 11:24:36 +01:00
Dario Tranchitella
a8a41951cb refactor!: keeping labels consistent
The label kamaji.clastix.io/soot is deprecated in favour of
kamaji.clastix.io/name, every external resource referring to this must
be aligned prior to updating to this version.
2023-02-13 11:24:36 +01:00
Dario Tranchitella
a0485c338b refactor(checksum): using helper functions 2023-02-10 15:31:28 +01:00
mendrugory
89edc8bbf5 chore: no maintainer 2023-02-09 14:24:35 +01:00
Dario Tranchitella
43765769ec feat: v0.2.0 release 2023-02-06 22:34:33 +01:00
Dario Tranchitella
0016f121ed feat(helm): emptyDir with memory medium for flock performances 2023-02-06 22:12:50 +01:00
Dario Tranchitella
c3fb5373f6 fix(e2e): waiting for reconciliation of the TCP 2023-02-06 22:12:50 +01:00
Dario Tranchitella
670f10ad4e docs: documenting new flag max-concurrent-tcp-reconciles 2023-02-06 22:12:50 +01:00
Dario Tranchitella
4110b688c9 feat: configurable max concurrent tcp reconciles 2023-02-06 22:12:50 +01:00
Dario Tranchitella
830d86a38a feat: introducing enqueueback reconciliation status
Required for the changes introduced with 74f7157e8b
2023-02-06 22:12:50 +01:00
Dario Tranchitella
44d1f3fa7f refactor: updating local tcp instance to avoid 2nd retrieval 2023-02-06 22:12:50 +01:00
Dario Tranchitella
e23ae3c7f3 feat: automatically set gomaxprocs to match container cpu quota 2023-02-06 22:12:50 +01:00
bsctl
713b0754bb docs: update to latest features 2023-02-05 10:08:49 +01:00
Dario Tranchitella
da924b30ff docs: benchmarking kamaji on AWS 2023-02-05 09:09:02 +01:00
Dario Tranchitella
0f0d83130f chore(helm): ServiceMonitor support 2023-02-05 09:09:02 +01:00
Dario Tranchitella
634e808d2d chore(kustomize): ServiceMonitor support 2023-02-05 09:09:02 +01:00
bsctl
b99f224d32 fix(helm): handle basicAuth values for datastore 2023-02-05 09:07:20 +01:00
Dario Tranchitella
d02b5f427e test(e2e): kube-apiserver kubelet-preferred-address-types support 2023-01-22 14:56:47 +01:00
Dario Tranchitella
08b5bc05c3 docs: kube-apiserver kubelet-preferred-address-types support 2023-01-22 14:56:47 +01:00
Dario Tranchitella
4bd8e2d319 chore(helm): kube-apiserver kubelet-preferred-address-types support 2023-01-22 14:56:47 +01:00
Dario Tranchitella
a1f155fcab chore(kustomize): kube-apiserver kubelet-preferred-address-types support 2023-01-22 14:56:47 +01:00
Dario Tranchitella
743ea1343f feat(api): kube-apiserver kubelet-preferred-address-types support 2023-01-22 14:56:47 +01:00
Dario Tranchitella
41780bcb04 docs: tcp deployment strategy support 2023-01-17 10:01:21 +01:00
Dario Tranchitella
014297bb0f chore(helm): tcp deployment strategy support 2023-01-17 10:01:21 +01:00
Dario Tranchitella
20cfdd6931 chore(kustomize): tcp deployment strategy support 2023-01-17 10:01:21 +01:00
Dario Tranchitella
f03e250cf8 feat(api): deployment strategy support 2023-01-17 10:01:21 +01:00
Dario Tranchitella
2cdee08924 chore(helm): certificate authority rotation handling 2023-01-13 19:09:03 +01:00
Dario Tranchitella
6d27ca9e9e chore(kustomize): certificate authority rotation handling 2023-01-13 19:09:03 +01:00
Dario Tranchitella
2293e49e4b fix: certificate authority rotation handling 2023-01-13 19:09:03 +01:00
Dario Tranchitella
6b0f92baa3 docs: certificate authority rotation handling 2023-01-13 19:09:03 +01:00
Dario Tranchitella
8e94039962 feat(api)!: introducing ca rotating status 2023-01-13 19:09:03 +01:00
Dario Tranchitella
551df6df97 fix(kubeadm_phase): wrong string value representation 2023-01-13 19:09:03 +01:00
93 changed files with 1543 additions and 773 deletions

View File

@@ -3,7 +3,7 @@
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
VERSION ?= 0.1.1
VERSION ?= 0.2.1
# CHANNELS define the bundle channels used in the bundle.
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")

View File

@@ -20,7 +20,7 @@ Global hyper-scalers are leading the Managed Kubernetes space, while other cloud
**Kamaji** aims to solve these pains by leveraging multi-tenancy and simplifying how to run multiple control planes on the same infrastructure with a fraction of the operational burden.
## How it works
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. What makes Kamaji special is that Control Planes of _“tenant clusters”_ are just regular pods running in the _“admin cluster”_ instead of dedicated Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate.
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. Kamaji is special because the Control Planes of _“tenant clusters”_ are just regular pods instead of dedicated Virtual Machines. This solution makes running Control Planes at scale cheaper and easier to deploy and operate.
![Architecture](docs/content/images/kamaji-light.png#gh-light-mode-only)
![Architecture](docs/content/images/kamaji-dark.png#gh-dark-mode-only)
@@ -40,7 +40,8 @@ Please refer to the [Getting Started guide](https://kamaji.clastix.io/getting-st
## Roadmap
- [ ] Benchmarking and stress-test
- [x] Benchmarking
- [ ] Stress-test
- [x] Support for dynamic address allocation on native Load Balancer
- [x] Zero Downtime Tenant Control Plane upgrade
- [x] `konnectivity` integration

View File

@@ -183,11 +183,12 @@ type KubernetesStatus struct {
Ingress *KubernetesIngressStatus `json:"ingress,omitempty"`
}
// +kubebuilder:validation:Enum=Provisioning;Upgrading;Migrating;Ready;NotReady
// +kubebuilder:validation:Enum=Provisioning;CertificateAuthorityRotating;Upgrading;Migrating;Ready;NotReady
type KubernetesVersionStatus string
var (
VersionProvisioning KubernetesVersionStatus = "Provisioning"
VersionCARotating KubernetesVersionStatus = "CertificateAuthorityRotating"
VersionUpgrading KubernetesVersionStatus = "Upgrading"
VersionMigrating KubernetesVersionStatus = "Migrating"
VersionReady KubernetesVersionStatus = "Ready"

View File

@@ -4,6 +4,7 @@
package v1alpha1
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -32,7 +33,23 @@ type NetworkProfileSpec struct {
DNSServiceIPs []string `json:"dnsServiceIPs,omitempty"`
}
// +kubebuilder:validation:Enum=Hostname;InternalIP;ExternalIP;InternalDNS;ExternalDNS
type KubeletPreferredAddressType string
const (
NodeHostName KubeletPreferredAddressType = "Hostname"
NodeInternalIP KubeletPreferredAddressType = "InternalIP"
NodeExternalIP KubeletPreferredAddressType = "ExternalIP"
NodeInternalDNS KubeletPreferredAddressType = "InternalDNS"
NodeExternalDNS KubeletPreferredAddressType = "ExternalDNS"
)
type KubeletSpec struct {
// Ordered list of the preferred NodeAddressTypes to use for kubelet connections.
// Default to Hostname, InternalIP, ExternalIP.
// +kubebuilder:default={"Hostname","InternalIP","ExternalIP"}
// +kubebuilder:validation:MinItems=1
PreferredAddressTypes []KubeletPreferredAddressType `json:"preferredAddressTypes,omitempty"`
// CGroupFS defines the cgroup driver for Kubelet
// https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/
CGroupFS CGroupDriver `json:"cgroupfs,omitempty"`
@@ -107,6 +124,10 @@ type DeploymentSpec struct {
// empty definition that uses the default runtime handler.
// More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
RuntimeClassName string `json:"runtimeClassName,omitempty"`
// Strategy describes how to replace existing pods with new ones for the given Tenant Control Plane.
// Default value is set to Rolling Update, with a blue/green strategy.
// +kubebuilder:default={type:"RollingUpdate",rollingUpdate:{maxUnavailable:0,maxSurge:"100%"}}
Strategy appsv1.DeploymentStrategy `json:"strategy,omitempty"`
// If specified, the Tenant Control Plane pod's tolerations.
// More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`

View File

@@ -13,6 +13,7 @@ import (
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -77,6 +78,10 @@ func (t *tenantControlPlaneValidator) ValidateCreate(_ context.Context, obj runt
return fmt.Errorf("unable to create a TenantControlPlane with a Kubernetes version greater than the supported one, actually %s", supportedVer.String())
}
if err = t.validatePreferredKubeletAddressTypes(tcp.Spec.Kubernetes.Kubelet.PreferredAddressTypes); err != nil {
return err
}
return nil
}
@@ -99,6 +104,9 @@ func (t *tenantControlPlaneValidator) ValidateUpdate(ctx context.Context, oldObj
if err := t.validateDataStore(ctx, old, tcp); err != nil {
return err
}
if err := t.validatePreferredKubeletAddressTypes(tcp.Spec.Kubernetes.Kubelet.PreferredAddressTypes); err != nil {
return err
}
return nil
}
@@ -107,6 +115,20 @@ func (t *tenantControlPlaneValidator) ValidateDelete(context.Context, runtime.Ob
return nil
}
func (t *tenantControlPlaneValidator) validatePreferredKubeletAddressTypes(addressTypes []KubeletPreferredAddressType) error {
s := sets.NewString()
for _, at := range addressTypes {
if s.Has(string(at)) {
return fmt.Errorf("preferred kubelet address types is stated multiple times: %s", at)
}
s.Insert(string(at))
}
return nil
}
func (t *tenantControlPlaneValidator) validateVersionUpdate(oldObj, newObj *TenantControlPlane) error {
oldVer, oldErr := semver.Make(t.normalizeKubernetesVersion(oldObj.Spec.Kubernetes.Version))
if oldErr != nil {

View File

@@ -1,5 +1,5 @@
apiVersion: v2
appVersion: v0.1.1
appVersion: v0.2.1
description: Kamaji is a tool aimed to build and operate a Managed Kubernetes Service
with a fraction of the operational burden. With Kamaji, you can deploy and operate
hundreds of Kubernetes clusters as a hyper-scaler.
@@ -13,13 +13,11 @@ maintainers:
name: Massimiliano Giovagnoli
- email: me@bsctl.io
name: Adriano Pezzuto
- email: iam@mendrugory.com
name: Gonzalo Gabriel Jiménez Fuentes
name: kamaji
sources:
- https://github.com/clastix/kamaji
type: application
version: 0.10.3
version: 0.11.3
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/release-name: kamaji

View File

@@ -1,6 +1,6 @@
# kamaji
![Version: 0.10.3](https://img.shields.io/badge/Version-0.10.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.1.1](https://img.shields.io/badge/AppVersion-v0.1.1-informational?style=flat-square)
![Version: 0.11.2](https://img.shields.io/badge/Version-0.11.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.2.1](https://img.shields.io/badge/AppVersion-v0.2.1-informational?style=flat-square)
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler.
@@ -11,7 +11,6 @@ Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a
| Dario Tranchitella | <dario@tranchitella.eu> | |
| Massimiliano Giovagnoli | <me@maxgio.it> | |
| Adriano Pezzuto | <me@bsctl.io> | |
| Gonzalo Gabriel Jiménez Fuentes | <iam@mendrugory.com> | |
## Source Code
@@ -99,6 +98,7 @@ Here the values you can override:
| etcd.overrides.endpoints | object | `{"etcd-0":"etcd-0.etcd.kamaji-system.svc.cluster.local","etcd-1":"etcd-1.etcd.kamaji-system.svc.cluster.local","etcd-2":"etcd-2.etcd.kamaji-system.svc.cluster.local"}` | (map) Dictionary of the endpoints for the etcd cluster's members, key is the name of the etcd server. Don't define the protocol (TLS is automatically inflected), or any port, inflected from .etcd.peerApiPort value. |
| etcd.peerApiPort | int | `2380` | The peer API port which servers are listening to. |
| etcd.persistence.accessModes[0] | string | `"ReadWriteOnce"` | |
| etcd.persistence.customAnnotations | object | `{}` | The custom annotations to add to the PVC |
| etcd.persistence.size | string | `"10Gi"` | |
| etcd.persistence.storageClass | string | `""` | |
| etcd.port | int | `2379` | The client request port. |
@@ -128,6 +128,7 @@ Here the values you can override:
| serviceAccount.annotations | object | `{}` | |
| serviceAccount.create | bool | `true` | |
| serviceAccount.name | string | `"kamaji-controller-manager"` | |
| serviceMonitor.enabled | bool | `false` | Toggle the ServiceMonitor true if you have Prometheus Operator installed and configured |
| temporaryDirectoryPath | string | `"/tmp/kamaji"` | Directory which will be used to work with temporary files. (default "/tmp/kamaji") |
| tolerations | list | `[]` | Kubernetes node taints that the Kamaji controller pods would tolerate |

View File

@@ -756,6 +756,34 @@ spec:
runtimeClassName:
description: 'RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run the Tenant Control Plane pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class'
type: string
strategy:
default:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 0
type: RollingUpdate
description: Strategy describes how to replace existing pods with new ones for the given Tenant Control Plane. Default value is set to Rolling Update, with a blue/green strategy.
properties:
rollingUpdate:
description: 'Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate. --- TODO: Update this to follow our convention for oneOf, whatever we decide it to be.'
properties:
maxSurge:
anyOf:
- type: integer
- type: string
description: 'The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.'
x-kubernetes-int-or-string: true
maxUnavailable:
anyOf:
- type: integer
- type: string
description: 'The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.'
x-kubernetes-int-or-string: true
type: object
type:
description: Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
type: string
type: object
tolerations:
description: 'If specified, the Tenant Control Plane pod''s tolerations. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/'
items:
@@ -972,6 +1000,22 @@ spec:
- systemd
- cgroupfs
type: string
preferredAddressTypes:
default:
- Hostname
- InternalIP
- ExternalIP
description: Ordered list of the preferred NodeAddressTypes to use for kubelet connections. Default to Hostname, InternalIP, ExternalIP.
items:
enum:
- Hostname
- InternalIP
- ExternalIP
- InternalDNS
- ExternalDNS
type: string
minItems: 1
type: array
type: object
version:
description: Kubernetes Version for the tenant control plane
@@ -1634,6 +1678,7 @@ spec:
description: Status returns the current status of the Kubernetes version, such as its provisioning state, or completed upgrade.
enum:
- Provisioning
- CertificateAuthorityRotating
- Upgrading
- Migrating
- Ready

View File

@@ -46,9 +46,9 @@ app.kubernetes.io/managed-by: {{ .Release.Service }}
Selector labels
*/}}
{{- define "kamaji.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kamaji.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: controller-manager
app.kubernetes.io/name: {{ default (include "kamaji.name" .) .name }}
app.kubernetes.io/instance: {{ default .Release.Name .instance }}
app.kubernetes.io/component: {{ default "controller-manager" .component }}
{{- end }}
{{/*
@@ -69,6 +69,13 @@ Create the name of the Service to user for webhooks
{{- printf "%s-webhook-service" (include "kamaji.fullname" .) }}
{{- end }}
{{/*
Create the name of the Service to user for metrics
*/}}
{{- define "kamaji.metricsServiceName" -}}
{{- printf "%s-metrics-service" (include "kamaji.fullname" .) }}
{{- end }}
{{/*
Create the name of the cert-manager secret
*/}}

View File

@@ -2,8 +2,8 @@ apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
labels:
{{- include "kamaji.labels" . | nindent 4 }}
app.kubernetes.io/component: certificate
{{- $data := . | mustMergeOverwrite (dict "component" "certificate") -}}
{{- include "kamaji.labels" $data | nindent 4 }}
name: {{ include "kamaji.certificateName" . }}
namespace: {{ .Release.Namespace }}
spec:

View File

@@ -2,8 +2,8 @@ apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
labels:
{{- include "kamaji.labels" . | nindent 4 }}
app.kubernetes.io/component: issuer
{{- $data := . | mustMergeOverwrite (dict "component" "issuer") -}}
{{- include "kamaji.labels" $data | nindent 4 }}
name: kamaji-selfsigned-issuer
namespace: {{ .Release.Namespace }}
spec:

View File

@@ -62,6 +62,9 @@ spec:
- containerPort: 9443
name: webhook-server
protocol: TCP
- containerPort: 8080
name: metrics
protocol: TCP
- containerPort: 8081
name: healthcheck
protocol: TCP
@@ -74,11 +77,16 @@ spec:
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
terminationGracePeriodSeconds: 10
volumes:
- name: tmp
emptyDir:
medium: Memory
- name: cert
secret:
defaultMode: 420

View File

@@ -12,7 +12,12 @@ spec:
{{- include "datastore.endpoints" . | indent 4 }}
{{- if (and .Values.datastore.basicAuth.usernameSecret.name .Values.datastore.basicAuth.passwordSecret.name) }}
basicAuth:
{{- .Values.datastore.basicAuth | toYaml | nindent 4 }}
username:
secretReference:
{{- .Values.datastore.basicAuth.usernameSecret | toYaml | nindent 8 }}
password:
secretReference:
{{- .Values.datastore.basicAuth.passwordSecret | toYaml | nindent 8 }}
{{- end }}
tlsConfig:
certificateAuthority:

View File

@@ -28,4 +28,8 @@ spec:
- --ignore-not-found=true
- {{ include "etcd.caSecretName" . }}
- {{ include "etcd.clientSecretName" . }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -63,4 +63,8 @@ spec:
- name: certs
secret:
secretName: {{ include "etcd.caSecretName" . }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -57,4 +57,8 @@ spec:
name: {{ include "etcd.csrConfigMapName" . }}
- name: certs
emptyDir: {}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -81,6 +81,10 @@ spec:
volumeClaimTemplates:
- metadata:
name: data
{{- with .Values.etcd.persistence.customAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
storageClassName: {{ .Values.etcd.persistence.storageClassName }}
accessModes:

View File

@@ -4,8 +4,8 @@ metadata:
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "kamaji.certificateName" . }}
labels:
{{- include "kamaji.labels" . | nindent 4 }}
app.kubernetes.io/instance: mutating-webhook-configuration
{{- $data := . | mustMergeOverwrite (dict "instance" "mutating-webhook-configuration") -}}
{{- include "kamaji.labels" $data | nindent 4 }}
name: kamaji-mutating-webhook-configuration
webhooks:
- admissionReviewVersions:

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
labels:
{{- $data := . | mustMergeOverwrite (dict "component" "metrics") -}}
{{- include "kamaji.labels" $data | nindent 4 }}
name: {{ include "kamaji.metricsServiceName" . }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: 8080
name: metrics
protocol: TCP
targetPort: metrics
selector:
{{- include "kamaji.selectorLabels" . | nindent 4 }}

View File

@@ -2,15 +2,15 @@ apiVersion: v1
kind: Service
metadata:
labels:
{{- include "kamaji.labels" . | nindent 4 }}
app.kubernetes.io/component: webhook
app.kubernetes.io/instance: webhook-service
{{- $data := . | mustMergeOverwrite (dict "component" "webhook" "instance" "webhook-service") -}}
{{- include "kamaji.labels" $data | nindent 4 }}
name: {{ include "kamaji.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: 443
protocol: TCP
name: webhook-server
targetPort: webhook-server
selector:
{{- include "kamaji.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,21 @@
{{- if and (.Capabilities.APIVersions.Has "monitoring.coreos.com/v1") .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
{{- $data := . | mustMergeOverwrite (dict "component" "servicemonitor") -}}
{{- include "kamaji.labels" $data | nindent 4 }}
name: {{ include "kamaji.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
endpoints:
- path: /metrics
port: metrics
scheme: http
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "kamaji.name" . }}
{{- end }}

View File

@@ -4,8 +4,8 @@ metadata:
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "kamaji.certificateName" . }}
labels:
{{- include "kamaji.labels" . | nindent 4 }}
app.kubernetes.io/instance: validating-webhook-configuration
{{- $data := . | mustMergeOverwrite (dict "instance" "validating-webhook-configuration") -}}
{{- include "kamaji.labels" $data | nindent 4 }}
name: kamaji-validating-webhook-configuration
webhooks:
- admissionReviewVersions:

View File

@@ -15,6 +15,11 @@ image:
# -- A list of extra arguments to add to the kamaji controller default ones
extraArgs: []
serviceMonitor:
# -- Toggle the ServiceMonitor true if you have Prometheus Operator installed and configured
enabled: false
etcd:
# -- Install an etcd with enabled multi-tenancy along with Kamaji
deploy: true
@@ -52,6 +57,9 @@ etcd:
storageClass: ""
accessModes:
- ReadWriteOnce
# -- The custom annotations to add to the PVC
customAnnotations: {}
# volumeType: local
overrides:
caSecret:

View File

@@ -41,6 +41,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
managerServiceName string
webhookCABundle []byte
migrateJobImage string
maxConcurrentReconciles int
webhookCAPath string
)
@@ -111,11 +112,12 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
KineContainerImage: kineImage,
TmpBaseDirectory: tmpDirectory,
},
TriggerChan: tcpChannel,
KamajiNamespace: managerNamespace,
KamajiServiceAccount: managerServiceAccountName,
KamajiService: managerServiceName,
KamajiMigrateImage: migrateJobImage,
TriggerChan: tcpChannel,
KamajiNamespace: managerNamespace,
KamajiServiceAccount: managerServiceAccountName,
KamajiService: managerServiceName,
KamajiMigrateImage: migrateJobImage,
MaxConcurrentReconciles: maxConcurrentReconciles,
}
if err = reconciler.SetupWithManager(mgr); err != nil {
@@ -201,6 +203,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
cmd.Flags().StringVar(&kineImage, "kine-image", "rancher/kine:v0.9.2-amd64", "Container image along with tag to use for the Kine sidecar container (used only if etcd-storage-type is set to one of kine strategies).")
cmd.Flags().StringVar(&datastore, "datastore", "etcd", "The default DataStore that should be used by Kamaji to setup the required storage.")
cmd.Flags().StringVar(&migrateJobImage, "migrate-image", fmt.Sprintf("clastix/kamaji:v%s", internal.GitTag), "Specify the container image to launch when a TenantControlPlane is migrated to a new datastore.")
cmd.Flags().IntVar(&maxConcurrentReconciles, "max-concurrent-tcp-reconciles", 1, "Specify the number of workers for the Tenant Control Plane controller (beware of CPU consumption)")
cmd.Flags().StringVar(&managerNamespace, "pod-namespace", os.Getenv("POD_NAMESPACE"), "The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs.")
cmd.Flags().StringVar(&managerServiceName, "webhook-service-name", "kamaji-webhook-service", "The Kamaji webhook server Service name which is used to get validation webhooks, required for the TenantControlPlane migration jobs.")
cmd.Flags().StringVar(&managerServiceAccountName, "serviceaccount-name", os.Getenv("SERVICE_ACCOUNT"), "The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs.")

View File

@@ -8,6 +8,7 @@ import (
"time"
"github.com/spf13/cobra"
_ "go.uber.org/automaxprocs" // Automatically set `GOMAXPROCS` to match Linux container CPU quota.
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"

View File

@@ -1244,6 +1244,65 @@ spec:
class with an empty definition that uses the default runtime
handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class'
type: string
strategy:
default:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 0
type: RollingUpdate
description: Strategy describes how to replace existing pods
with new ones for the given Tenant Control Plane. Default
value is set to Rolling Update, with a blue/green strategy.
properties:
rollingUpdate:
description: 'Rolling update config params. Present only
if DeploymentStrategyType = RollingUpdate. --- TODO:
Update this to follow our convention for oneOf, whatever
we decide it to be.'
properties:
maxSurge:
anyOf:
- type: integer
- type: string
description: 'The maximum number of pods that can
be scheduled above the desired number of pods. Value
can be an absolute number (ex: 5) or a percentage
of desired pods (ex: 10%). This can not be 0 if
MaxUnavailable is 0. Absolute number is calculated
from percentage by rounding up. Defaults to 25%.
Example: when this is set to 30%, the new ReplicaSet
can be scaled up immediately when the rolling update
starts, such that the total number of old and new
pods do not exceed 130% of desired pods. Once old
pods have been killed, new ReplicaSet can be scaled
up further, ensuring that total number of pods running
at any time during the update is at most 130% of
desired pods.'
x-kubernetes-int-or-string: true
maxUnavailable:
anyOf:
- type: integer
- type: string
description: 'The maximum number of pods that can
be unavailable during the update. Value can be an
absolute number (ex: 5) or a percentage of desired
pods (ex: 10%). Absolute number is calculated from
percentage by rounding down. This can not be 0 if
MaxSurge is 0. Defaults to 25%. Example: when this
is set to 30%, the old ReplicaSet can be scaled
down to 70% of desired pods immediately when the
rolling update starts. Once new pods are ready,
old ReplicaSet can be scaled down further, followed
by scaling up the new ReplicaSet, ensuring that
the total number of pods available at all times
during the update is at least 70% of desired pods.'
x-kubernetes-int-or-string: true
type: object
type:
description: Type of deployment. Can be "Recreate" or
"RollingUpdate". Default is RollingUpdate.
type: string
type: object
tolerations:
description: 'If specified, the Tenant Control Plane pod''s
tolerations. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/'
@@ -1617,6 +1676,24 @@ spec:
- systemd
- cgroupfs
type: string
preferredAddressTypes:
default:
- Hostname
- InternalIP
- ExternalIP
description: Ordered list of the preferred NodeAddressTypes
to use for kubelet connections. Default to Hostname, InternalIP,
ExternalIP.
items:
enum:
- Hostname
- InternalIP
- ExternalIP
- InternalDNS
- ExternalDNS
type: string
minItems: 1
type: array
type: object
version:
description: Kubernetes Version for the tenant control plane
@@ -2461,6 +2538,7 @@ spec:
version, such as its provisioning state, or completed upgrade.
enum:
- Provisioning
- CertificateAuthorityRotating
- Upgrading
- Migrating
- Ready

View File

@@ -19,8 +19,7 @@ bases:
- ../samples
- ../webhook
- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
- ../prometheus
patchesStrategicMerge:
# Mount the controller config file for loading manager configurations

View File

@@ -1001,6 +1001,34 @@ spec:
runtimeClassName:
description: 'RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run the Tenant Control Plane pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class'
type: string
strategy:
default:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 0
type: RollingUpdate
description: Strategy describes how to replace existing pods with new ones for the given Tenant Control Plane. Default value is set to Rolling Update, with a blue/green strategy.
properties:
rollingUpdate:
description: 'Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate. --- TODO: Update this to follow our convention for oneOf, whatever we decide it to be.'
properties:
maxSurge:
anyOf:
- type: integer
- type: string
description: 'The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.'
x-kubernetes-int-or-string: true
maxUnavailable:
anyOf:
- type: integer
- type: string
description: 'The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.'
x-kubernetes-int-or-string: true
type: object
type:
description: Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
type: string
type: object
tolerations:
description: 'If specified, the Tenant Control Plane pod''s tolerations. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/'
items:
@@ -1217,6 +1245,22 @@ spec:
- systemd
- cgroupfs
type: string
preferredAddressTypes:
default:
- Hostname
- InternalIP
- ExternalIP
description: Ordered list of the preferred NodeAddressTypes to use for kubelet connections. Default to Hostname, InternalIP, ExternalIP.
items:
enum:
- Hostname
- InternalIP
- ExternalIP
- InternalDNS
- ExternalDNS
type: string
minItems: 1
type: array
type: object
version:
description: Kubernetes Version for the tenant control plane
@@ -1879,6 +1923,7 @@ spec:
description: Status returns the current status of the Kubernetes version, such as its provisioning state, or completed upgrade.
enum:
- Provisioning
- CertificateAuthorityRotating
- Upgrading
- Migrating
- Ready
@@ -2259,7 +2304,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
image: clastix/kamaji:v0.1.1
image: clastix/kamaji:v0.2.1
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2272,6 +2317,9 @@ spec:
- containerPort: 9443
name: webhook-server
protocol: TCP
- containerPort: 8080
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /readyz
@@ -2373,6 +2421,25 @@ spec:
name: root-client-certs
namespace: kamaji-system
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
control-plane: controller-manager
name: kamaji-controller-manager-metrics-monitor
namespace: kamaji-system
spec:
endpoints:
- path: /metrics
port: metrics
scheme: http
namespaceSelector:
matchNames:
- kamaji-system
selector:
matchLabels:
control-plane: controller-manager
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:

View File

@@ -13,4 +13,4 @@ kind: Kustomization
images:
- name: controller
newName: clastix/kamaji
newTag: v0.1.1
newTag: v0.2.1

View File

@@ -42,6 +42,10 @@ spec:
image: controller:latest
imagePullPolicy: Always
name: manager
ports:
- containerPort: 8080
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
livenessProbe:

View File

@@ -1,2 +1,5 @@
resources:
- monitor.yaml
configurations:
- kustomizeconfig.yaml

View File

@@ -0,0 +1,4 @@
varReference:
- kind: ServiceMonitor
group: monitoring.coreos.com
path: spec/namespaceSelector/matchNames

View File

@@ -1,4 +1,3 @@
# Prometheus Monitor Service (Metrics)
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
@@ -10,11 +9,11 @@ metadata:
spec:
endpoints:
- path: /metrics
port: https
scheme: https
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
tlsConfig:
insecureSkipVerify: true
port: metrics
scheme: http
namespaceSelector:
matchNames:
- $(SERVICE_NAMESPACE)
selector:
matchLabels:
control-plane: controller-manager

View File

@@ -60,7 +60,7 @@ func (c *CoreDNS) Reconcile(ctx context.Context, request reconcile.Request) (rec
return reconcile.Result{}, nil
}
if err = utils.UpdateStatus(ctx, c.AdminClient, c.GetTenantControlPlaneFunc, resource); err != nil {
if err = utils.UpdateStatus(ctx, c.AdminClient, tcp, resource); err != nil {
c.logger.Error(err, "update status failed", "resource", resource.GetName())
return reconcile.Result{}, err

View File

@@ -60,7 +60,7 @@ func (k *KonnectivityAgent) Reconcile(ctx context.Context, _ reconcile.Request)
continue
}
if err = utils.UpdateStatus(ctx, k.AdminClient, k.GetTenantControlPlaneFunc, resource); err != nil {
if err = utils.UpdateStatus(ctx, k.AdminClient, tcp, resource); err != nil {
k.logger.Error(err, "update status failed", "resource", resource.GetName())
return reconcile.Result{}, err

View File

@@ -50,7 +50,7 @@ func (k *KubeadmPhase) Reconcile(ctx context.Context, _ reconcile.Request) (reco
return reconcile.Result{}, nil
}
if err = utils.UpdateStatus(ctx, k.Phase.GetClient(), k.GetTenantControlPlaneFunc, k.Phase); err != nil {
if err = utils.UpdateStatus(ctx, k.Phase.GetClient(), tcp, k.Phase); err != nil {
k.logger.Error(err, "update status failed")
return reconcile.Result{}, err

View File

@@ -60,7 +60,7 @@ func (k *KubeProxy) Reconcile(ctx context.Context, _ reconcile.Request) (reconci
return reconcile.Result{}, nil
}
if err = utils.UpdateStatus(ctx, k.AdminClient, k.GetTenantControlPlaneFunc, resource); err != nil {
if err = utils.UpdateStatus(ctx, k.AdminClient, tcp, resource); err != nil {
k.logger.Error(err, "update status failed")
return reconcile.Result{}, err

View File

@@ -123,22 +123,27 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
// the soot manager if this is already registered.
v, ok := m.sootMap[request.String()]
if ok {
// The TenantControlPlane is in non-ready mode, or marked for deletion:
// we don't want to pollute with messages due to broken connection.
// Once the TCP will be ready again, the event will be intercepted and the manager started back.
if tcpStatus == kamajiv1alpha1.VersionNotReady {
switch {
case tcpStatus == kamajiv1alpha1.VersionCARotating:
// The TenantControlPlane CA has been rotated, it means the running manager
// must be restarted to avoid certificate signed by unknown authority errors.
return reconcile.Result{}, m.cleanup(ctx, request, tcp)
}
for _, trigger := range v.triggers {
trigger <- event.GenericEvent{Object: tcp}
case tcpStatus == kamajiv1alpha1.VersionNotReady:
// The TenantControlPlane is in non-ready mode, or marked for deletion:
// we don't want to pollute with messages due to broken connection.
// Once the TCP will be ready again, the event will be intercepted and the manager started back.
return reconcile.Result{}, m.cleanup(ctx, request, tcp)
default:
for _, trigger := range v.triggers {
trigger <- event.GenericEvent{Object: tcp}
}
}
return reconcile.Result{}, nil
}
// No need to start a soot manager if the TenantControlPlane is not ready:
// enqueuing back is not required since we're going to get that event once ready.
if tcpStatus == kamajiv1alpha1.VersionNotReady {
if tcpStatus == kamajiv1alpha1.VersionNotReady || tcpStatus == kamajiv1alpha1.VersionCARotating {
log.FromContext(ctx).Info("skipping start of the soot manager for a not ready instance")
return reconcile.Result{}, nil

View File

@@ -6,18 +6,23 @@ package controllers
import (
"context"
"fmt"
"strings"
"time"
"github.com/juju/mutex/v2"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
errors2 "k8s.io/apimachinery/pkg/api/errors"
apimachineryerrors "k8s.io/apimachinery/pkg/api/errors"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
"k8s.io/utils/clock"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
@@ -36,14 +41,17 @@ import (
// TenantControlPlaneReconciler reconciles a TenantControlPlane object.
type TenantControlPlaneReconciler struct {
Client client.Client
APIReader client.Reader
Config TenantControlPlaneReconcilerConfig
TriggerChan TenantControlPlaneChannel
KamajiNamespace string
KamajiServiceAccount string
KamajiService string
KamajiMigrateImage string
Client client.Client
APIReader client.Reader
Config TenantControlPlaneReconcilerConfig
TriggerChan TenantControlPlaneChannel
KamajiNamespace string
KamajiServiceAccount string
KamajiService string
KamajiMigrateImage string
MaxConcurrentReconciles int
clock mutex.Clock
}
// TenantControlPlaneReconcilerConfig gives the necessary configuration for TenantControlPlaneReconciler.
@@ -68,7 +76,7 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
tenantControlPlane, err := r.getTenantControlPlane(ctx, req.NamespacedName)()
if err != nil {
if errors2.IsNotFound(err) {
if apimachineryerrors.IsNotFound(err) {
log.Info("resource may have been deleted, skipping")
return ctrl.Result{}, nil
@@ -79,6 +87,25 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
return ctrl.Result{}, err
}
releaser, err := mutex.Acquire(r.mutexSpec(tenantControlPlane))
if err != nil {
switch {
case errors.As(err, &mutex.ErrTimeout):
log.Info("acquire timed out, current process is blocked by another reconciliation")
return ctrl.Result{Requeue: true}, nil
case errors.As(err, &mutex.ErrCancelled):
log.Info("acquire cancelled")
return ctrl.Result{Requeue: true}, nil
default:
log.Error(err, "acquire failed")
return ctrl.Result{}, err
}
}
defer releaser.Release()
markedToBeDeleted := tenantControlPlane.GetDeletionTimestamp() != nil
if markedToBeDeleted && !controllerutil.ContainsFinalizer(tenantControlPlane, finalizers.DatastoreFinalizer) {
@@ -156,7 +183,7 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
continue
}
if err = utils.UpdateStatus(ctx, r.Client, r.getTenantControlPlane(ctx, req.NamespacedName), resource); err != nil {
if err = utils.UpdateStatus(ctx, r.Client, tenantControlPlane, resource); err != nil {
log.Error(err, "update of the resource failed", "resource", resource.GetName())
return ctrl.Result{}, err
@@ -164,7 +191,11 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
log.Info(fmt.Sprintf("%s has been configured", resource.GetName()))
return ctrl.Result{}, nil
if result == resources.OperationResultEnqueueBack {
log.Info("requested enqueuing back", "resources", resource.GetName())
return ctrl.Result{Requeue: true}, nil
}
}
log.Info(fmt.Sprintf("%s has been reconciled", tenantControlPlane.GetName()))
@@ -172,8 +203,20 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
return ctrl.Result{}, nil
}
func (r *TenantControlPlaneReconciler) mutexSpec(obj client.Object) mutex.Spec {
return mutex.Spec{
Name: strings.ReplaceAll(fmt.Sprintf("kamaji%s", obj.GetUID()), "-", ""),
Clock: r.clock,
Delay: 10 * time.Millisecond,
Timeout: time.Second,
Cancel: nil,
}
}
// SetupWithManager sets up the controller with the Manager.
func (r *TenantControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.clock = clock.RealClock{}
return ctrl.NewControllerManagedBy(mgr).
Watches(&source.Channel{Source: r.TriggerChan}, handler.Funcs{GenericFunc: func(genericEvent event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) {
limitingInterface.AddRateLimited(ctrl.Request{
@@ -217,6 +260,9 @@ func (r *TenantControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error
return ok && v == "migrate"
}))).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.MaxConcurrentReconciles,
}).
Complete(r)
}

View File

@@ -7,24 +7,27 @@ import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/resources"
)
func UpdateStatus(ctx context.Context, client client.Client, tcpRetrieval TenantControlPlaneRetrievalFn, resource resources.Resource) error {
updateErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
tenantControlPlane, err := tcpRetrieval()
if err != nil {
return err
}
func UpdateStatus(ctx context.Context, client client.Client, tcp *kamajiv1alpha1.TenantControlPlane, resource resources.Resource) error {
updateErr := retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
defer func() {
if err != nil {
_ = client.Get(ctx, types.NamespacedName{Name: tcp.Name, Namespace: tcp.Namespace}, tcp)
}
}()
if err = resource.UpdateTenantControlPlaneStatus(ctx, tenantControlPlane); err != nil {
if err = resource.UpdateTenantControlPlaneStatus(ctx, tcp); err != nil {
return fmt.Errorf("error applying TenantcontrolPlane status: %w", err)
}
if err = client.Status().Update(ctx, tenantControlPlane); err != nil {
if err = client.Status().Update(ctx, tcp); err != nil {
return fmt.Errorf("error updating tenantControlPlane status: %w", err)
}

View File

@@ -15,7 +15,7 @@ export KAMAJI_NAMESPACE=kamaji-system
export TENANT_NAMESPACE=default
export TENANT_NAME=tenant-00
export TENANT_DOMAIN=$KAMAJI_REGION.cloudapp.azure.com
export TENANT_VERSION=v1.25.0
export TENANT_VERSION=v1.26.0
export TENANT_PORT=6443 # port used to expose the tenant api server
export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server
export TENANT_POD_CIDR=10.36.0.0/16

View File

@@ -5,7 +5,7 @@ export KAMAJI_NAMESPACE=kamaji-system
export TENANT_NAMESPACE=default
export TENANT_NAME=tenant-00
export TENANT_DOMAIN=clastix.labs
export TENANT_VERSION=v1.25.0
export TENANT_VERSION=v1.26.0
export TENANT_PORT=6443 # port used to expose the tenant api server
export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server
export TENANT_POD_CIDR=10.36.0.0/16

View File

@@ -11,13 +11,13 @@ prometheus-stack:
helm repo update
helm install prometheus-stack --create-namespace -n monitoring prometheus-community/kube-prometheus-stack
reqs: kind ingress-nginx etcd-cluster cert-manager
reqs: kind ingress-nginx cert-manager
cert-manager:
@kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.10.1/cert-manager.yaml
kamaji: reqs
@kubectl apply -f $(kind_path)/../../config/install.yaml
helm install kamaji --create-namespace -n kamaji-system $(kind_path)/../../charts/kamaji
destroy: kind/destroy etcd-certificates/cleanup

View File

@@ -7,7 +7,7 @@ export DOCKER_IMAGE_NAME="kindest/node"
export DOCKER_NETWORK="kind"
# Variables
export KUBERNETES_VERSION=${1:-v1.23.5}
export KUBERNETES_VERSION=${1:-v1.23.4}
export KUBECONFIG="${KUBECONFIG:-/tmp/kubeconfig}"
if [ -z $2 ]

View File

@@ -11,9 +11,9 @@ These are requirements of the design behind Kamaji:
Goals and scope may vary as the project evolves.
## Tenant Control Plane
What makes Kamaji special is that the Control Plane of a _“tenant cluster”_ is just one or more regular pods running in a namespace of the _“admin cluster”_ instead of a dedicated set of Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate. The Tenant Control Plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
Kamaji is special because the Control Planes of the _“tenant cluster”_ are regular pods running in a namespace of the _“admin cluster”_ instead of a dedicated set of Virtual Machines. This solution makes running Control Planes at scale cheaper and easier to deploy and operate. The Tenant Control Plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
High Availability and rolling updates of the Tenant Control Plane pods are provided by a regular Deployment. Autoscaling based on the metrics is available. A Service is used to espose the Tenant Control Plane outside of the _“admin cluster”_. The `LoadBalancer` service type is used, `NodePort` and `ClusterIP` with an Ingress Controller are still viable options, depending on the case.
High Availability and rolling updates of the Tenant Control Plane pods are provided by a regular Deployment. Autoscaling based on the metrics is available. A Service is used to espose the Tenant Control Plane outside of the _“admin cluster”_. The `LoadBalancer` service type is used, `NodePort` and `ClusterIP` are other viable options, depending on the case.
Kamaji offers a [Custom Resource Definition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing a Tenant Control Plane. This *CRD* is called `TenantControlPlane`, or `tcp` in short.
@@ -25,9 +25,7 @@ And what about the tenant worker nodes? They are just _"worker nodes"_, i.e. reg
We have in roadmap, the Cluster APIs support as well as a Terraform provider so that you can create _“tenant clusters”_ in a declarative way.
## Datastores
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each tenant cluster saves the state to be able to store and retrieve data. A dedicated `etcd` cluster for each tenant cluster doesnt scale well for a managed service because `etcd` data persistence can be cumbersome at scale, rising the operational effort to mitigate it. So we have to find an alternative keeping in mind our goal for a resilient and cost-optimized solution at the same time.
As we can deploy any Kubernetes cluster with an external `etcd` cluster, we explored this option for the tenant control planes. On the admin cluster, we can deploy a multi-tenant `etcd` datastore to save the state of multiple tenant clusters. Kamaji offers a Custom Resource Definition called `DataStore` to provide a declarative approach of managing Tenant datastores. With this solution, the resiliency is guaranteed by the usual `etcd` mechanism, and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that we have to operate an external datastore, in addition to `etcd` of the _“admin cluster”_ and manage the access to be sure that each _“tenant cluster”_ uses only its data.
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each tenant cluster saves the state to be able to store and retrieve data. As we can deploy a Kubernetes cluster with an external `etcd` cluster, we explored this option for the Tenant Control Planes. On the admin cluster, you can deploy one or multi-tenant `etcd` to save the state of multiple tenant clusters. Kamaji offers a Custom Resource Definition called `DataStore` to provide a declarative approach of managing multiple datastores. By sharing the datastore between multiple tenants, the resiliency is still guaranteed and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that you have to operate external datastores, in addition to `etcd` of the _“admin cluster”_ and manage the access to be sure that each _“tenant cluster”_ uses only its data.
### Other storage drivers
Kamaji offers the option of using a more capable datastore than `etcd` to save the state of multiple tenants' clusters. Thanks to the native [kine](https://github.com/k3s-io/kine) integration, you can run _MySQL_ or _PostgreSQL_ compatible databases as datastore for _“tenant clusters”_.
@@ -35,6 +33,11 @@ Kamaji offers the option of using a more capable datastore than `etcd` to save t
### Pooling
By default, Kamaji is expecting to persist all the _“tenant clusters”_ data in a unique datastore that could be backed by different drivers. However, you can pick a different datastore for a specific set of _“tenant clusters”_ that could have different resources assigned or a different tiering. Pooling of multiple datastore is an option you can leverage for a very large set of _“tenant clusters”_ so you can distribute the load properly. As future improvements, we have a _datastore scheduler_ feature in roadmap so that Kamaji itself can assign automatically a _“tenant cluster”_ to the best datastore in the pool.
### Migration
In order to simplify Day2 Operations and reduce the operational burden, Kamaji provides the capability to live migrate data from a datastore to another one of the same driver without manual and error prone backup and restore operations.
> Currently, live data migration is only available between datastores having the same driver.
## Konnectivity
In addition to the standard control plane containers, Kamaji creates an instance of [konnectivity-server](https://kubernetes.io/docs/concepts/architecture/control-plane-node-communication/) running as sidecar container in the `tcp` pod and exposed on port `8132` of the `tcp` service.

View File

@@ -8,15 +8,15 @@ We assume you have installed on your workstation:
- [Docker](https://docker.com)
- [KinD](https://kind.sigs.k8s.io/)
- [kubectl@v1.25.0](https://kubernetes.io/docs/tasks/tools/#kubectl)
- [kubeadm@v1.25.0](https://kubernetes.io/docs/tasks/tools/#kubeadm)
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
- [Helm](https://helm.sh/docs/intro/install/)
- [jq](https://stedolan.github.io/jq/)
- [openssl](https://www.openssl.org/)
- [cfssl/cfssljson](https://github.com/cloudflare/cfssl)
> Starting from Kamaji v0.0.2, `kubectl` and `kubeadm` need to meet at least minimum version to `v1.25.0`:
> this is required due to the latest changes addressed from the release Kubernetes 1.25 release regarding the `kubelet-config` ConfigMap required for the node join.
> Starting from Kamaji v0.1.0, `kubectl` and `kubeadm` need to meet at least minimum version to `v1.25.0` due to the changes regarding the `kubelet-config` ConfigMap required for the node join.
## Setup Kamaji on KinD
@@ -26,81 +26,65 @@ The instance of Kamaji is made of a single node hosting:
- admin worker
- multi-tenant datastore
### Standard installation
### Standard Installation
You can install your KinD cluster, ETCD multi-tenant cluster and Kamaji operator with a **single command**:
You can install your KinD cluster, an `etcd` based multi-tenant datastore and the Kamaji operator with a **single command**:
```bash
$ make -C deploy/kind
```
Now you can [create your first `TenantControlPlane`](#deploy-tenant-control-plane).
Now you can deploy a [`TenantControlPlane`](#deploy-tenant-control-plane).
### Data store-specific
### Installation with alternative datastore drivers
Kamaji offers the possibility of using a different storage system than `ETCD` for the tenants, like `MySQL` or `PostgreSQL` compatible databases.
Kamaji offers the possibility of using a different storage system than `etcd` for datastore, like `MySQL` or `PostgreSQL` compatible databases.
First, setup a KinD cluster:
First, setup a KinD cluster and the other requirements:
```bash
$ make -C deploy/kind kind
$ make -C deploy/kind reqs
```
#### ETCD
Install one of the alternative supported databases:
Deploy a multi-tenant `ETCD` cluster into the Kamaji node:
- **MySQL** install it with command:
`$ make -C deploy/kine/mysql mariadb`
- **PostgreSQL** install it with command:
`$ make -C deploy/kine/postgresql postgresql`
Then use Helm to install the Kamaji Operator and make sure it uses a datastore with the proper driver `datastore.driver=<MySQL|PostgreSQL>`.
For example, with a PostreSQL datastore:
```bash
$ make -C deploy/kind etcd-cluster
helm install kamaji charts/kamaji -n kamaji-system --create-namespace \
--set etcd.deploy=false \
--set datastore.driver=PostgreSQL \
--set datastore.endpoints[0]=postgres-default-rw.kamaji-system.svc:5432 \
--set datastore.basicAuth.usernameSecret.name=postgres-default-superuser \
--set datastore.basicAuth.usernameSecret.namespace=kamaji-system \
--set datastore.basicAuth.usernameSecret.keyPath=username \
--set datastore.basicAuth.passwordSecret.name=postgres-default-superuser \
--set datastore.basicAuth.passwordSecret.namespace=kamaji-system \
--set datastore.basicAuth.passwordSecret.keyPath=password \
--set datastore.tlsConfig.certificateAuthority.certificate.name=postgres-default-ca \
--set datastore.tlsConfig.certificateAuthority.certificate.namespace=kamaji-system \
--set datastore.tlsConfig.certificateAuthority.certificate.keyPath=ca.crt \
--set datastore.tlsConfig.certificateAuthority.privateKey.name=postgres-default-ca \
--set datastore.tlsConfig.certificateAuthority.privateKey.namespace=kamaji-system \
--set datastore.tlsConfig.certificateAuthority.privateKey.keyPath=ca.key \
--set datastore.tlsConfig.clientCertificate.certificate.name=postgres-default-root-cert \
--set datastore.tlsConfig.clientCertificate.certificate.namespace=kamaji-system \
--set datastore.tlsConfig.clientCertificate.certificate.keyPath=tls.crt \
--set datastore.tlsConfig.clientCertificate.privateKey.name=postgres-default-root-cert \
--set datastore.tlsConfig.clientCertificate.privateKey.namespace=kamaji-system \
--set datastore.tlsConfig.clientCertificate.privateKey.keyPath=tls.key
```
Now you're ready to [install Kamaji operator](#install-kamaji).
#### MySQL
Deploy a MySQL/MariaDB backend into the Kamaji node:
```bash
$ make -C deploy/kine/mysql mariadb
```
Adjust the Kamaji install manifest `config/install.yaml` according to the example of a MySQL DataStore `config/samples/kamaji_v1alpha1_datastore_mysql.yaml` and make sure Kamaji uses the proper datastore name:
```
--datastore={.metadata.name}
```
Now you're ready to [install Kamaji operator](#install-kamaji).
#### PostgreSQL
Deploy a PostgreSQL backend into the Kamaji node:
```bash
$ make -C deploy/kine/postgresql postgresql
```
Adjust the Kamaji install manifest `config/install.yaml` according to the example of a PostgreSQL DataStore `config/samples/kamaji_v1alpha1_datastore_postgresql.yaml` and make sure Kamaji uses the proper datastore name:
```
--datastore={.metadata.name}
```
Now you're ready to [install Kamaji operator](#install-kamaji).
### Install Kamaji
Kamaji takes advantage of the [dynamic admission control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), such as validating and mutating webhook configurations.
These webhooks are secured by a TLS communication, and the certificates are managed by [`cert-manager`](https://cert-manager.io/), making it a prerequisite that must be [installed](https://cert-manager.io/docs/installation/).
```bash
$ kubectl apply -f config/install.yaml
```
> Please note that this single YAML manifest is missing some required automations.
> The preferred way to install Kamaji is using its Helm Chart.
> Please, refer to the section [**Setup Kamaji on a generic infrastructure**.](/guides/kamaji-deployment-guide#install-kamaji-controller)
### Deploy Tenant Control Plane
Now it is the moment of deploying your first tenant control plane.
@@ -156,7 +140,7 @@ EOF
> Check networkProfile fields according to your installation
> To let Kamaji works in kind, you have indicate that the service must be [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport)
### Get Kubeconfig
### Get the kubeconfig
Let's retrieve kubeconfig and store in `/tmp/kubeconfig`

View File

@@ -0,0 +1,171 @@
# Datastore Migration
On the admin cluster, you can deploy one or more multi-tenant datastores as `etcd`, `PostgreSQL`, and `MySQL` to save the state of the tenant clusters. A Tenant Control Plane can be migrated from a datastore to another one without service disruption or without complex and error prone backup & restore procedures.
This guide will assist you to live migrate Tenant's data from a datastore to another one having the same `etcd` driver.
## Prerequisites
Assume you have a Tenant Control Plane using the default datastore:
``` shell
kubectl get tcp
NAME VERSION STATUS CONTROL-PLANE ENDPOINT KUBECONFIG DATASTORE AGE
tenant-00 v1.25.2 Ready 192.168.32.200:6443 tenant-00-admin-kubeconfig default 8d
```
You can check a custom resource called `DataStore` providing a declarative description of the `default` datastore:
```yaml
apiVersion: kamaji.clastix.io/v1alpha1
kind: DataStore
metadata:
annotations:
labels:
name: default
spec:
driver: etcd
endpoints:
- etcd-0.etcd.kamaji-system.svc.cluster.local:2379
- etcd-1.etcd.kamaji-system.svc.cluster.local:2379
- etcd-2.etcd.kamaji-system.svc.cluster.local:2379
tlsConfig:
certificateAuthority:
certificate:
secretReference:
keyPath: ca.crt
name: etcd-certs
namespace: kamaji-system
privateKey:
secretReference:
keyPath: ca.key
name: etcd-certs
namespace: kamaji-system
clientCertificate:
certificate:
secretReference:
keyPath: tls.crt
name: etcd-root-client-certs
namespace: kamaji-system
privateKey:
secretReference:
keyPath: tls.key
name: etcd-root-client-certs
namespace: kamaji-system
status:
usedBy:
- default/tenant-00
```
The `default` datastore is installed by Kamaji Helm chart in the same namespace hosting the controller:
```shell
kubectl -n kamaji-system get pods
NAME READY STATUS RESTARTS AGE
etcd-0 1/1 Running 0 23d
etcd-1 1/1 Running 0 23d
etcd-2 1/1 Running 0 23d
kamaji-5d6cdfbbb9-bn27f 1/1 Running 0 2d19h
```
## Install a new datastore
A managed datastore is highly recommended in production. The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a managed multi-tenant `etcd` running as StatefulSet made of three replicas:
```bash
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install dedicated clastix/kamaji-etcd -n dedicated --create-namespace --set datastore.enabled=true
```
You should end up with a new datastore `dedicated` provided by an `etcd` cluster:
```yaml
kubectl get datastore dedicated -o yaml
apiVersion: kamaji.clastix.io/v1alpha1
kind: DataStore
metadata:
annotations:
labels:
name: dedicated
spec:
driver: etcd
endpoints:
- dedicated-0.dedicated.dedicated.svc.cluster.local:2379
- dedicated-1.dedicated.dedicated.svc.cluster.local:2379
- dedicated-2.dedicated.dedicated.svc.cluster.local:2379
tlsConfig:
certificateAuthority:
certificate:
secretReference:
keyPath: ca.crt
name: dedicated-certs
namespace: dedicated
privateKey:
secretReference:
keyPath: ca.key
name: dedicated-certs
namespace: dedicated
clientCertificate:
certificate:
secretReference:
keyPath: tls.crt
name: dedicated-root-client-certs
namespace: dedicated
privateKey:
secretReference:
keyPath: tls.key
name: dedicated-root-client-certs
namespace: dedicated
status: {}
```
Check the `etcd` cluster:
```bash
kubectl -n dedicated get sts,pods,pvc
NAME READY AGE
statefulset.apps/dedicated 3/3 25h
NAME READY STATUS RESTARTS AGE
pod/dedicated-0 1/1 Running 0 25h
pod/dedicated-1 1/1 Running 0 25h
pod/dedicated-2 1/1 Running 0 25h
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/data-dedicated-0 Bound pvc-a5c66737-ef78-4689-b863-037f8382ed78 10Gi RWO local-path 25h
persistentvolumeclaim/data-dedicated-1 Bound pvc-1e9f77eb-89f3-4256-9508-c18b71fca7ea 10Gi RWO local-path 25h
persistentvolumeclaim/data-dedicated-2 Bound pvc-957c4802-1e7c-4f37-ac01-b89ad1fa9fdb 10Gi RWO local-path 25h
```
## Migrate data
To migrate data from current `default` datastore to the new dedicated one, patch the Tenant Control Plane `tenant-00` to use the new `dedicated` datastore:
```shell
kubectl patch --type merge tcp tenant-00 -p '{"spec": {"dataStore": "dedicated"}}'
```
and check the process happening in real time:
```shell
kubectl get tcp -w
NAME VERSION STATUS CONTROL-PLANE ENDPOINT KUBECONFIG DATASTORE AGE
tenant-00 v1.25.2 Ready 192.168.32.200:6443 tenant-00-admin-kubeconfig default 9d
tenant-00 v1.25.2 Migrating 192.168.32.200:6443 tenant-00-admin-kubeconfig default 9d
tenant-00 v1.25.2 Migrating 192.168.32.200:6443 tenant-00-admin-kubeconfig default 9d
tenant-00 v1.25.2 Migrating 192.168.32.200:6443 tenant-00-admin-kubeconfig dedicated 9d
tenant-00 v1.25.2 Migrating 192.168.32.200:6443 tenant-00-admin-kubeconfig dedicated 9d
tenant-00 v1.25.2 Ready 192.168.32.200:6443 tenant-00-admin-kubeconfig dedicated 9d
```
During the datastore migration, the Tenant Control Plane is put in read-only mode to avoid misalignments between source and destination datastores. If tenant users try to update the data, an admission controller denies the request with the following message:
```shell
Error from server (the current Control Plane is in freezing mode due to a maintenance mode,
all the changes are blocked: removing the webhook may lead to an inconsistent state upon its completion):
admission webhook "catchall.migrate.kamaji.clastix.io" denied the request
```
After a while, depending on the amount of data to migrate, the Tenant Control Plane is put back in full operating mode by the Kamaji controller.
> Please, note the datastore migration leaves the data on the default datastore, so you have to remove it manually.

View File

@@ -13,7 +13,6 @@ The guide requires:
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
* [Access Admin cluster](#access-admin-cluster)
* [Install DataStore](#install-datastore)
* [Install Kamaji controller](#install-kamaji-controller)
* [Create Tenant Cluster](#create-tenant-cluster)
* [Cleanup](#cleanup)
@@ -96,21 +95,13 @@ And check you can access:
kubectl cluster-info
```
## Install datastore
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of an unamanaged `etcd`. However, a managed `etcd` is highly recommended in production.
As alternative, the [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a manged multi-tenant `etcd` as 3 replicas StatefulSet with data persistence:
```bash
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install etcd clastix/kamaji-etcd -n kamaji-system --create-namespace
```
Optionally, Kamaji offers the possibility of using a different storage system for the tenants' clusters, as MySQL or PostgreSQL compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
## Install Kamaji Controller
Install Kamaji with `helm` using an unmanaged `etcd` as datastore:
Kamaji takes advantage of the [dynamic admission control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), such as validating and mutating webhook configurations. These webhooks are secured by a TLS communication, and the certificates are managed by [`cert-manager`](https://cert-manager.io/), making it a prerequisite that must be [installed](https://cert-manager.io/docs/installation/).
The Kamaji controller needs to access a default datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unamanaged `etcd`, out of box.
Install Kamaji with `helm` using an unmanaged `etcd` as default datastore:
```bash
helm repo add clastix https://clastix.github.io/charts
@@ -118,15 +109,7 @@ helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
```
Alternatively, if you opted for a managed `etcd` datastore:
```
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace --set etcd.deploy=false
```
Congratulations! You just turned your Azure Kubernetes AKS cluster into a Kamaji cluster capable to run multiple Tenant Control Planes.
A managed datastore is highly recommended in production. The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a managed multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a different storage system, as `MySQL` or `PostgreSQL` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
## Create Tenant Cluster
@@ -146,6 +129,7 @@ metadata:
name: ${TENANT_NAME}
namespace: ${TENANT_NAMESPACE}
spec:
dataStore: default
controlPlane:
deployment:
replicas: 3
@@ -171,7 +155,7 @@ spec:
requests:
cpu: 125m
memory: 256Mi
limits: {}
limits: {}
service:
additionalMetadata:
labels:
@@ -219,7 +203,7 @@ spec:
protocol: TCP
targetPort: ${TENANT_PORT}
selector:
kamaji.clastix.io/soot: ${TENANT_NAME}
kamaji.clastix.io/name: ${TENANT_NAME}
type: LoadBalancer
EOF

View File

@@ -13,7 +13,6 @@ The guide requires:
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
* [Access Admin cluster](#access-admin-cluster)
* [Install DataStore](#install-datastore)
* [Install Kamaji controller](#install-kamaji-controller)
* [Create Tenant Cluster](#create-tenant-cluster)
* [Cleanup](#cleanup)
@@ -42,30 +41,26 @@ Throughout the following instructions, shell variables are used to indicate valu
source kamaji.env
```
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the admin cluster should provide at least:
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the admin cluster should provide:
- CNI module installed, eg. [Calico](https://github.com/projectcalico/calico), [Cilium](https://github.com/cilium/cilium).
- CSI module installed with a Storage Class for the Tenants' `etcd`. Local Persistent Volumes are an option.
- Support for LoadBalancer Service Type, or alternatively, an Ingress Controller, eg. [ingress-nginx](https://github.com/kubernetes/ingress-nginx), [haproxy](https://github.com/haproxytech/kubernetes-ingress).
- Monitoring Stack, eg. [Prometheus](https://github.com/prometheus-community).
- CSI module installed with a Storage Class for the Tenant datastores. Local Persistent Volumes are an option.
- Support for LoadBalancer service type, eg. [MetalLB](https://metallb.universe.tf/), or alternatively, an Ingress Controller, eg. [ingress-nginx](https://github.com/kubernetes/ingress-nginx), [haproxy](https://github.com/haproxytech/kubernetes-ingress).
- Optionally, a Monitoring Stack installed, eg. [Prometheus](https://github.com/prometheus-community).
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into Kamaji Admin Cluster.
## Install datastore
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of an unamanaged `etcd`. However, a managed `etcd` is highly recommended in production.
As alternative, the [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a manged multi-tenant `etcd` as 3 replicas StatefulSet with data persistence:
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into Kamaji Admin Cluster and check you can access:
```bash
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install etcd clastix/kamaji-etcd -n kamaji-system --create-namespace
kubectl cluster-info
```
Optionally, Kamaji offers the possibility of using a different storage system for the tenants' clusters, as MySQL or PostgreSQL compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
## Install Kamaji Controller
Install Kamaji with `helm` using an unmanaged `etcd` as datastore:
Kamaji takes advantage of the [dynamic admission control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), such as validating and mutating webhook configurations. These webhooks are secured by a TLS communication, and the certificates are managed by [`cert-manager`](https://cert-manager.io/), making it a prerequisite that must be [installed](https://cert-manager.io/docs/installation/).
The Kamaji controller needs to access a default datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of a basic unamanaged `etcd`, out of box.
Install Kamaji with `helm` using an unmanaged `etcd` as default datastore:
```bash
helm repo add clastix https://clastix.github.io/charts
@@ -73,15 +68,7 @@ helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
```
Alternatively, if you opted for a managed `etcd` datastore:
```bash
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace --set etcd.deploy=false
```
Congratulations! You just turned your Kubernetes cluster into a Kamaji cluster capable to run multiple Tenant Control Planes.
A managed datastore is highly recommended in production. The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a managed multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a different storage system, as `MySQL` or `PostgreSQL` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
## Create Tenant Cluster
@@ -97,6 +84,7 @@ metadata:
name: ${TENANT_NAME}
namespace: ${TENANT_NAMESPACE}
spec:
dataStore: default
controlPlane:
deployment:
replicas: 3
@@ -159,12 +147,13 @@ EOF
kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
```
After a few minutes, check the created resources in the tenants namespace and when ready it will look similar to the following:
After a few seconds, check the created resources in the tenants namespace and when ready it will look similar to the following:
```command
kubectl -n tenants get tcp,deploy,pods,svc
NAME VERSION STATUS CONTROL-PLANE-ENDPOINT KUBECONFIG AGE
tenantcontrolplane.kamaji.clastix.io/tenant-00 v1.23.1 Ready 192.168.32.240:6443 tenant-00-admin-kubeconfig 2m20s
NAME VERSION STATUS CONTROL-PLANE ENDPOINT KUBECONFIG DATASTORE AGE
tenantcontrolplane/tenant-00 v1.25.2 Ready 192.168.32.240:6443 tenant-00-admin-kubeconfig default 2m20s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/tenant-00 3/3 3 3 118s
@@ -178,9 +167,9 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
service/tenant-00 LoadBalancer 10.32.132.241 192.168.32.240 6443:32152/TCP,8132:32713/TCP 2m20s
```
The regular Tenant Control Plane containers: `kube-apiserver`, `kube-controller-manager`, `kube-scheduler` are running unchanged in the `tcp` pods instead of dedicated machines and they are exposed through a service on the port `6443` of worker nodes in the Admin cluster.
The regular Tenant Control Plane containers: `kube-apiserver`, `kube-controller-manager`, `kube-scheduler` are running unchanged in the `tcp` pods instead of dedicated machines and they are exposed through a service on the port `6443` of worker nodes in the admin cluster.
The `LoadBalancer` service type is used to expose the Tenant Control Plane. However, `NodePort` and `ClusterIP` with an Ingress Controller are still viable options, depending on the case. High Availability and rolling updates of the Tenant Control Plane are provided by the `tcp` Deployment and all the resources reconcilied by the Kamaji controller.
The `LoadBalancer` service type is used to expose the Tenant Control Plane on the assigned `loadBalancerIP` acting as `ControlPlaneEndpoint` for the worker nodes and other clients as, for example, `kubectl`. Service types `NodePort` and `ClusterIP` are still viable options to expose the Tenant Control Plane, depending on the case. High Availability and rolling updates of the Tenant Control Planes are provided by the `tcp` Deployment and all the resources reconcilied by the Kamaji controller.
### Working with Tenant Control Plane

View File

@@ -1,20 +1,16 @@
# Manage tenant resources GitOps-way from the admin cluster
In this guide, you can learn how to apply applications and resources in general, the GitOps-way, to the Tenant Control Planes.
This guide describe a declarative way to deploy Kubernetes add-ons across multiple Tenant Clusters, the GitOps-way. An admin may need to apply a specific workload into Tenant Clusters and ensure is constantly reconciled, no matter what the tenants will do in their clusters. Examples include installing monitoring agents, ensuring specific policies, installing infrastructure operators like Cert Manager and so on.
An admin may need to apply a specific workload into tenant control planes and ensure is constantly reconciled, no matter what the tenants will do in their clusters.
Examples include installing monitoring agents, ensuring specific policies, installing infrastructure operators like Cert Manager and so on.
This way the tenant resources can be ensured from a single pane of glass, from the *admin cluster*.
## Flux as the GitOps operator
As GitOps ensures a constant reconciliation to a Git-versioned desired state, Flux can satisfy the requirement of those scenarios.
In particular, the controllers that reconcile [resources](https://fluxcd.io/flux/concepts/#reconciliation) support communicating to external clusters.
As GitOps ensures a constant reconciliation to a Git-versioned desired state, [Flux](https://fluxcd.io) can satisfy the requirement of those scenarios. In particular, the controllers that reconcile [resources](https://fluxcd.io/flux/concepts/#reconciliation) support communicating to external clusters.
In this scenario the Flux toolkit would run in the *admin cluster*, with reconcile controllers reconciling resources into *tenant clusters*.
<img src="../images/kamaji-flux.png" alt="kamaji-flux" width="720"/>
![Architecture](../images/kamaji-flux.png)
This is something possible as the Flux reconciliation Custom Resources specifications provide ability to specify `Secret` which contain a `kubeconfig` - here you can find the related documentation for both [`Kustomization`](https://fluxcd.io/flux/components/kustomize/kustomization/#remote-clusters--cluster-api) and [`HelmRelease`](https://fluxcd.io/flux/components/helm/helmreleases/#remote-clusters--cluster-api) CRs.
@@ -86,10 +82,6 @@ tenant1-cert-manager-cainjector 1/1 1 1 4m3s
tenant1-cert-manager-webhook 1/1 1 1 4m3s
```
## Conclusion
This way tenant resources can be ensured from a single pane of glass, from the *admin cluster*.
No matter what the tenant users will do on the *tenant cluster*, the Flux reconciliation controllers wirunning in the *admin cluster* will ensure the desired state declared by the reconciliation resources applied existing in the *admin cluster*, will be reconciled in the *tenant cluster*.
Furthermore, this approach does not need to have in each tenant cluster nor Flux neither applied the related reconciliation Custom Resorces.

View File

@@ -7,7 +7,26 @@ The process of upgrading a _“tenant cluster”_ consists in two steps:
## Upgrade of Tenant Control Plane
You should patch the `TenantControlPlane.spec.kubernetes.version` custom resource with a new compatible value according to the [Version Skew Policy](https://kubernetes.io/releases/version-skew-policy/).
> Note: during the upgrade, a new ReplicaSet of Tenant Control Plane pod will be created, so make sure you have at least two pods to avoid service disruption.
During the upgrade, a new ReplicaSet of Tenant Control Plane pod will be created, so make sure you have enough replicas to avoid service disruption. Also make sure you have the Rolling Update strategy properly configured:
```yaml
apiVersion: kamaji.clastix.io/v1alpha1
kind: TenantControlPlane
metadata:
name: tenant-00
spec:
controlPlane:
deployment:
replicas: 3
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
...
```
## Upgrade of Tenant Worker Nodes
As currently Kamaji is not providing any helpers for Tenant Worker Nodes, you should make sure to upgrade them manually, for example, with the help of `kubeadm`. We have in roadmap, the Cluster APIs support so that you can upgrade _“tenant clusters”_ in a fully declarative way.
As currently Kamaji is not providing any helpers for Tenant Worker Nodes, you should make sure to upgrade them manually, for example, with the help of `kubeadm`. Refer to the official [documentation](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/#upgrade-worker-nodes).
> We have in roadmap, the Cluster APIs support so that you can upgrade _“tenant clusters”_ in a fully declarative way.

View File

@@ -1006,6 +1006,15 @@ Defining the options for the deployed Tenant Control Plane as Deployment resourc
RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run the Tenant Control Plane pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class<br/>
</td>
<td>false</td>
</tr><tr>
<td><b><a href="#tenantcontrolplanespeccontrolplanedeploymentstrategy">strategy</a></b></td>
<td>object</td>
<td>
Strategy describes how to replace existing pods with new ones for the given Tenant Control Plane. Default value is set to Rolling Update, with a blue/green strategy.<br/>
<br/>
<i>Default</i>: map[rollingUpdate:map[maxSurge:100% maxUnavailable:0] type:RollingUpdate]<br/>
</td>
<td>false</td>
</tr><tr>
<td><b><a href="#tenantcontrolplanespeccontrolplanedeploymenttolerationsindex">tolerations</a></b></td>
<td>[]object</td>
@@ -2511,6 +2520,72 @@ ComponentResourceRequirements describes the compute resource requirements.
</table>
### TenantControlPlane.spec.controlPlane.deployment.strategy
Strategy describes how to replace existing pods with new ones for the given Tenant Control Plane. Default value is set to Rolling Update, with a blue/green strategy.
<table>
<thead>
<tr>
<th>Name</th>
<th>Type</th>
<th>Description</th>
<th>Required</th>
</tr>
</thead>
<tbody><tr>
<td><b><a href="#tenantcontrolplanespeccontrolplanedeploymentstrategyrollingupdate">rollingUpdate</a></b></td>
<td>object</td>
<td>
Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate. --- TODO: Update this to follow our convention for oneOf, whatever we decide it to be.<br/>
</td>
<td>false</td>
</tr><tr>
<td><b>type</b></td>
<td>string</td>
<td>
Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.<br/>
</td>
<td>false</td>
</tr></tbody>
</table>
### TenantControlPlane.spec.controlPlane.deployment.strategy.rollingUpdate
Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate. --- TODO: Update this to follow our convention for oneOf, whatever we decide it to be.
<table>
<thead>
<tr>
<th>Name</th>
<th>Type</th>
<th>Description</th>
<th>Required</th>
</tr>
</thead>
<tbody><tr>
<td><b>maxSurge</b></td>
<td>int or string</td>
<td>
The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.<br/>
</td>
<td>false</td>
</tr><tr>
<td><b>maxUnavailable</b></td>
<td>int or string</td>
<td>
The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.<br/>
</td>
<td>false</td>
</tr></tbody>
</table>
### TenantControlPlane.spec.controlPlane.deployment.tolerations[index]
@@ -2862,6 +2937,15 @@ Kubernetes specification for tenant control plane
<i>Enum</i>: systemd, cgroupfs<br/>
</td>
<td>false</td>
</tr><tr>
<td><b>preferredAddressTypes</b></td>
<td>[]enum</td>
<td>
Ordered list of the preferred NodeAddressTypes to use for kubelet connections. Default to Hostname, InternalIP, ExternalIP.<br/>
<br/>
<i>Default</i>: [Hostname InternalIP ExternalIP]<br/>
</td>
<td>false</td>
</tr></tbody>
</table>
@@ -5306,7 +5390,7 @@ KubernetesVersion contains the information regarding the running Kubernetes vers
<td>
Status returns the current status of the Kubernetes version, such as its provisioning state, or completed upgrade.<br/>
<br/>
<i>Enum</i>: Provisioning, Upgrading, Migrating, Ready, NotReady<br/>
<i>Enum</i>: Provisioning, CertificateAuthorityRotating, Upgrading, Migrating, Ready, NotReady<br/>
<i>Default</i>: Provisioning<br/>
</td>
<td>false</td>

View File

@@ -0,0 +1,239 @@
# Benchmark
Kamaji has been designed to operate a large scale of Kubernetes Tenant Control Plane resources.
In the Operator jargon, a manager is created to start several controllers, each one with their own responsibility.
When a manager is started, all the underlying controllers are started, along with other "runnable" resources, like the webhook server.
Kamaji operates several reconciliation operations, both in the admin and tenant clusters.
With that said, a main manager is responsible to reconcile the admin resources (Deployment, Secret, ConfigMap, etc.), for each Tenant Control Plane a new manager will be spin-up as a main manager controller.
These Tenant Control Plane managers, named in the code base as soot managers, in turn, start and run controllers to ensure the desired state of the underlying add-ons, and required resources such as kubeadm ones.
With that said, monitoring the Kamaji stack is essential to understand any anomaly in memory consumption, or CPU usage.
The provided Helm Chart is offering a [`ServiceMonitor`](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md) that can be used to extract all the required metrics of the Kamaji operator.
# Running 100 Tenant Control Planes using a single DataStore
- _Cloud platform:_ AWS
- _Amount of reconciled TCPs:_ 100
- _Number of DataStore resources:_ 1
- _DataStore driver_: `etcd`
- _Reconciliation time requested:_ ~7m30s
The following benchmark wants to shed a light on the Kamaji resource consumption such as CPU and memory to orchestrate 100 TCPs using a single shared datastore.
Your mileage may vary and just want to share with the community how it has been elaborated.
## Infrastructure
The benchmark has been issued on a Kubernetes cluster backed by Elastic Kubernetes Service used as an Admin cluster.
Two node pools have been created to avoid the noisy neighbour effect, and to increase the performances:
- `infra`: hosting Kamaji, the monitoring stack, and the `DataStore` resources, made of 2 `t3.medium` instances.
- `workload`: hosting the deployed Tenant Control Plane resources, made of 25 `r3.xlarge` instances.
### Monitoring stack
The following monitoring stack was required to perform the benchmark.
#### Deploy the Prometheus operator
```
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update
helm upgrade --install kube-prometheus bitnami/kube-prometheus --namespace monitoring-system --create-namespace \
--set operator.nodeSelector."eks\.amazonaws\.com/nodegroup"=infra \
--set "operator.tolerations[0].key=pool" \
--set "operator.tolerations[0].operator=Equal" \
--set "operator.tolerations[0].value=infra" \
--set "operator.tolerations[0].effect=NoExecute" \
--set "operator.tolerations[1].key=pool" \
--set "operator.tolerations[1].operator=Equal" \
--set "operator.tolerations[1].value=infra" \
--set "operator.tolerations[1].effect=NoSchedule" \
--set prometheus.nodeSelector."eks\.amazonaws\.com/nodegroup"=infra \
--set "prometheus.tolerations[0].key=pool" \
--set "prometheus.tolerations[0].operator=Equal" \
--set "prometheus.tolerations[0].value=infra" \
--set "prometheus.tolerations[0].effect=NoExecute" \
--set "prometheus.tolerations[1].key=pool" \
--set "prometheus.tolerations[1].operator=Equal" \
--set "prometheus.tolerations[1].value=infra" \
--set "prometheus.tolerations[1].effect=NoSchedule" \
--set alertmanager.nodeSelector."eks\.amazonaws\.com/nodegroup"=infra \
--set "alertmanager.tolerations[0].key=pool" \
--set "alertmanager.tolerations[0].operator=Equal" \
--set "alertmanager.tolerations[0].value=infra" \
--set "alertmanager.tolerations[0].effect=NoExecute" \
--set "alertmanager.tolerations[1].key=pool" \
--set "alertmanager.tolerations[1].operator=Equal" \
--set "alertmanager.tolerations[1].value=infra" \
--set "alertmanager.tolerations[1].effect=NoSchedule" \
--set kube-state-metrics.nodeSelector."eks\.amazonaws\.com/nodegroup"=infra \
--set "kube-state-metrics.tolerations[0].key=pool" \
--set "kube-state-metrics.tolerations[0].operator=Equal" \
--set "kube-state-metrics.tolerations[0].value=infra" \
--set "kube-state-metrics.tolerations[0].effect=NoExecute" \
--set "kube-state-metrics.tolerations[1].key=pool" \
--set "kube-state-metrics.tolerations[1].operator=Equal" \
--set "kube-state-metrics.tolerations[1].value=infra" \
--set "kube-state-metrics.tolerations[1].effect=NoSchedule" \
--set blackboxExporter.nodeSelector."eks\.amazonaws\.com/nodegroup"=infra \
--set "blackboxExporter.tolerations[0].key=pool" \
--set "blackboxExporter.tolerations[0].operator=Equal" \
--set "blackboxExporter.tolerations[0].value=infra" \
--set "blackboxExporter.tolerations[0].effect=NoExecute" \
--set "blackboxExporter.tolerations[1].key=pool" \
--set "blackboxExporter.tolerations[1].operator=Equal" \
--set "blackboxExporter.tolerations[1].value=infra" \
--set "blackboxExporter.tolerations[1].effect=NoSchedule"
```
### Deploy a Grafana dashboard
```
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update
helm upgrade --install grafana bitnami/grafana --namespace monitoring-system --create-namespace \
--set grafana.nodeSelector."eks\.amazonaws\.com/nodegroup"=infra \
--set "grafana.tolerations[0].key=pool" \
--set "grafana.tolerations[0].operator=Equal" \
--set "grafana.tolerations[0].value=infra" \
--set "grafana.tolerations[0].effect=NoSchedule" \
--set "grafana.tolerations[1].key=pool" \
--set "grafana.tolerations[1].operator=Equal" \
--set "grafana.tolerations[1].value=infra" \
--set "grafana.tolerations[1].effect=NoExecute"
```
> The dashboard can be used to have a visual representation of the global cluster resources usage, and getting information about the single Pods resources consumption.
>
> Besides that, Grafana has been used to track the etcd cluster status and performances, although it's not the subject of the benchmark.
### Install cert-manager
```
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update
helm upgrade --install cert-manager bitnami/cert-manager --namespace certmanager-system --create-namespace \
--set cainjector.nodeSelector."eks\.amazonaws\.com/nodegroup"=infra \
--set "cainjector.tolerations[0].key=pool" \
--set "cainjector.tolerations[0].operator=Equal" \
--set "cainjector.tolerations[0].value=infra" \
--set "cainjector.tolerations[0].effect=NoSchedule" \
--set "cainjector.tolerations[1].key=pool" \
--set "cainjector.tolerations[1].operator=Equal" \
--set "cainjector.tolerations[1].value=infra" \
--set "cainjector.tolerations[1].effect=NoExecute" \
--set controller.nodeSelector."eks\.amazonaws\.com/nodegroup"=infra \
--set "controller.tolerations[0].key=pool" \
--set "controller.tolerations[0].operator=Equal" \
--set "controller.tolerations[0].value=infra" \
--set "controller.tolerations[0].effect=NoExecute" \
--set "controller.tolerations[1].key=pool" \
--set "controller.tolerations[1].operator=Equal" \
--set "controller.tolerations[1].value=infra" \
--set "controller.tolerations[1].effect=NoSchedule" \
--set webhook.nodeSelector."eks\.amazonaws\.com/nodegroup"=infra \
--set "webhook.tolerations[0].key=pool" \
--set "webhook.tolerations[0].operator=Equal" \
--set "webhook.tolerations[0].value=infra" \
--set "webhook.tolerations[0].effect=NoSchedule" \
--set "webhook.tolerations[1].key=pool" \
--set "webhook.tolerations[1].operator=Equal" \
--set "webhook.tolerations[1].value=infra" \
--set "webhook.tolerations[1].effect=NoExecute" \
--set "installCRDs=true"
```
### Install Kamaji
```
helm upgrade --install kamaji clastix/kamaji --namespace kamaji-system --create-namespace \
--set nodeSelector."eks\.amazonaws\.com/nodegroup"=infra \
--set "tolerations[0].key=pool" \
--set "tolerations[0].operator=Equal" \
--set "tolerations[0].value=infra" \
--set "tolerations[0].effect=NoExecute" \
--set "tolerations[1].key=pool" \
--set "tolerations[1].operator=Equal" \
--set "tolerations[1].value=infra" \
--set "tolerations[1].effect=NoSchedule" \
--set "resources=null"
```
> For the benchmark, Kamaji is running without any resource constraint to benefit of all the available resources.
### Install etcd as a DataStore
```
helm upgrade --install etcd-01 clastix/kamaji-etcd --namespace kamaji-etcd --create-namespace --set "serviceMonitor.enabled=true" --set "datastore.enabled=true"
```
## Creating 100 Tenant Control Planes
Once all the required components have been deployed, a simple bash for loop can be used to deploy the TCP resources.
```
kubectl create ns benchmark01
for I in {001..100}; do
DS=etcd-01 NS=benchmark01 I=$I envsubst < kamaji_v1alpha1_tenantcontrolplane.yaml | kubectl apply -f -
done
```
Content of the benchmark file:
```yaml
apiVersion: kamaji.clastix.io/v1alpha1
kind: TenantControlPlane
metadata:
name: benchmark$I
namespace: $NS
spec:
dataStore: $DS
controlPlane:
deployment:
replicas: 2
resources:
apiServer:
requests:
memory: 320Mi
cpu: 100m
service:
serviceType: ClusterIP
kubernetes:
version: "v1.25.4"
kubelet:
cgroupfs: cgroupfs
admissionControllers:
- ResourceQuota
- LimitRanger
networkProfile:
port: 6443
addons:
coreDNS: {}
kubeProxy: {}
```
> For the benchmark we're not creating a LoadBalancer service, or an Ingress.
> The goal of the benchmark is to monitor resource consumption, and the average time required to create the requested resources.
## Conclusion
Our latest benchmark showed the ability to fully reconcile 100 Tenant Control Plane resources in ~7m30s.
All the Tenant Control Planes were in `Ready` state and able to handle any request.
The CPU consumption of Kamaji was fluctuating between 100 and 1200 mCPU during the peaks due to certificate generations.
The memory consumption of Kamaji hit ~600 MiB, although this data is not entirely representative since we didn't put any memory limit.
In conclusion, Kamaji was able to reconcile a Tenant Control Plane every 4.5 seconds, requiring 12 mCPU, and 6 MiB of memory.
The following values may vary according to the nodes, resource limits, and other constraints.
If you're encountering different results, please, engage with the community to share them.
# Running a thousand of Tenant Control Planes using multiple DataStores
The next benchmark must address the use case where a Kamaji admin cluster manages up to a thousand Tenant Control Plane instances.

View File

@@ -4,17 +4,22 @@ Currently, **Kamaji** allows customization using CLI flags for the `manager` sub
Available flags are the following:
```
--datastore string The default DataStore that should be used by Kamaji to setup the required storage (default "etcd")
--health-probe-bind-address string The address the probe endpoint binds to. (default ":8081")
-h, --help help for manager
--kine-image string Container image along with tag to use for the Kine sidecar container (used only if etcd-storage-type is set to one of kine strategies) (default "rancher/kine:v0.9.2-amd64")
--leader-elect Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. (default true)
--metrics-bind-address string The address the metric endpoint binds to. (default ":8080")
--tmp-directory string Directory which will be used to work with temporary files. (default "/tmp/kamaji")
--zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
--zap-encoder encoder Zap log encoding (one of 'json' or 'console')
--zap-log-level level Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity
--zap-stacktrace-level level Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').
--zap-time-encoding time-encoding Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.
```
| Flag | Usage | Default |
| ---- | ------ | --- |
| `--metrics-bind-address` | The address the metric endpoint binds to. | `:8080` |
| `--health-probe-bind-address` | The address the probe endpoint binds to. | `:8081` |
| `--leader-elect` | Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. | `true` |
| `--tmp-directory` | Directory which will be used to work with temporary files. | `/tmp/kamaji` |
| `--kine-image` | Container image along with tag to use for the Kine sidecar container (used only if etcd-storage-type is set to one of kine strategies). | `rancher/kine:v0.9.2-amd64` |
| `--datastore` | The default DataStore that should be used by Kamaji to setup the required storage. | `etcd` |
| `--migrate-image` | Specify the container image to launch when a TenantControlPlane is migrated to a new datastore. | `migrate-image` |
| `--max-concurrent-tcp-reconciles` | Specify the number of workers for the Tenant Control Plane controller (beware of CPU consumption). | `1` |
| `--pod-namespace` | The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs. | `os.Getenv("POD_NAMESPACE")` |
| `--webhook-service-name` | The Kamaji webhook server Service name which is used to get validation webhooks, required for the TenantControlPlane migration jobs. | `kamaji-webhook-service` |
| `--serviceaccount-name` | The Kubernetes ServiceAccount used by the Operator, required for the TenantControlPlane migration jobs. | `os.Getenv("SERVICE_ACCOUNT")` |
| `--webhook-ca-path` | Path to the Manager webhook server CA, required for the TenantControlPlane migration jobs. | `/tmp/k8s-webhook-server/serving-certs/ca.crt` |
| `--zap-devel` | Development Mode (encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode (encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error). | `true` |
| `--zap-encoder` | Zap log encoding, one of 'json' or 'console' | `console` |
| `--zap-log-level` | Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity | `info` |
| `--zap-stacktrace-level` | Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic'). | `info` |
| `--zap-time-encoding` | Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano') | `epoch` |

View File

@@ -2,9 +2,9 @@
In Kamaji, there are different components that might require independent versioning and support level:
|Kamaji|Admin Cluster|Tenant Cluster (min)|Tenant Cluster (max)|Konnectivity|Tenant etcd |
|------|-------------|--------------------|--------------------|------------|------------|
|0.0.1 |1.22.0+ |1.21.0 |1.23.5 |0.0.31 |3.5.4 |
|0.0.2 |1.22.0+ |1.21.0 |1.25.0 |0.0.32 |3.5.4 |
|Kamaji|Admin Cluster (min)|Admin Cluster (max)|Tenant Cluster (min)|Tenant Cluster (max)|Konnectivity|Tenant etcd |
|------|-------------------|-------------------|--------------------|--------------------|------------|------------|
|0.0.1 |1.22.0 |1.24.0 |1.21.0 |1.23.5 |0.0.31 |3.5.4 |
|0.1.0 |1.22.0 |1.25.0 |1.21.0 |1.25.0 |0.0.32 |3.5.4 |
|0.2.0 |1.22.0 |1.26.0 |1.21.0 |1.26.0 |0.0.32 |3.5.6 |
Other combinations might work but they have not been yet tested.

View File

@@ -46,10 +46,13 @@ nav:
- guides/kamaji-azure-deployment-guide.md
- guides/postgresql-datastore.md
- guides/mysql-datastore.md
- guides/kamaji-gitops-flux.md
- guides/upgrade.md
- guides/datastore-migration.md
- 'Use Cases': use-cases.md
- 'Reference':
- reference/index.md
- reference/benchmark.md
- reference/configuration.md
- reference/conformance.md
- reference/versioning.md
@@ -58,4 +61,3 @@ nav:
- contribute/index.md
- contribute/guidelines.md
- contribute/governance.md

View File

@@ -62,6 +62,8 @@ var _ = Describe("When migrating a Tenant Control Plane to another datastore", f
})
// Check if TenantControlPlane resource has been created
It("Should contain all the migrated data", func() {
time.Sleep(10 * time.Second)
By("getting TCP rest.Config")
config, err := utilities.GetTenantKubeconfig(context.Background(), k8sClient, tcp)
Expect(err).ToNot(HaveOccurred())

View File

@@ -0,0 +1,89 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package e2e
import (
"context"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
)
var _ = Describe("Deploy a TenantControlPlane with wrong preferred kubelet address type entries", func() {
It("should fail when using duplicates", func() {
Consistently(func() error {
tcp := &kamajiv1alpha1.TenantControlPlane{
ObjectMeta: metav1.ObjectMeta{
Name: "duplicated-kubelet-preferred-address-type",
Namespace: "default",
},
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
DataStore: "default",
ControlPlane: kamajiv1alpha1.ControlPlane{
Deployment: kamajiv1alpha1.DeploymentSpec{
Replicas: 1,
},
Service: kamajiv1alpha1.ServiceSpec{
ServiceType: "ClusterIP",
},
},
Kubernetes: kamajiv1alpha1.KubernetesSpec{
Version: "v1.23.6",
Kubelet: kamajiv1alpha1.KubeletSpec{
PreferredAddressTypes: []kamajiv1alpha1.KubeletPreferredAddressType{
kamajiv1alpha1.NodeHostName,
kamajiv1alpha1.NodeInternalIP,
kamajiv1alpha1.NodeExternalIP,
kamajiv1alpha1.NodeHostName,
},
CGroupFS: "cgroupfs",
},
},
},
}
return k8sClient.Create(context.Background(), tcp)
}, 10*time.Second, time.Second).ShouldNot(Succeed())
})
It("should fail when using non valid entries", func() {
Consistently(func() error {
tcp := &kamajiv1alpha1.TenantControlPlane{
ObjectMeta: metav1.ObjectMeta{
Name: "duplicated-kubelet-preferred-address-type",
Namespace: "default",
},
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
DataStore: "default",
ControlPlane: kamajiv1alpha1.ControlPlane{
Deployment: kamajiv1alpha1.DeploymentSpec{
Replicas: 1,
},
Service: kamajiv1alpha1.ServiceSpec{
ServiceType: "ClusterIP",
},
},
Kubernetes: kamajiv1alpha1.KubernetesSpec{
Version: "v1.23.6",
Kubelet: kamajiv1alpha1.KubeletSpec{
PreferredAddressTypes: []kamajiv1alpha1.KubeletPreferredAddressType{
kamajiv1alpha1.NodeHostName,
kamajiv1alpha1.NodeInternalIP,
kamajiv1alpha1.NodeExternalIP,
"Foo",
},
CGroupFS: "cgroupfs",
},
},
},
}
return k8sClient.Create(context.Background(), tcp)
}, 10*time.Second, time.Second).ShouldNot(Succeed())
})
})

71
go.mod
View File

@@ -10,6 +10,7 @@ require (
github.com/go-sql-driver/mysql v1.6.0
github.com/google/uuid v1.3.0
github.com/json-iterator/go v1.1.12
github.com/juju/mutex/v2 v2.0.0
github.com/onsi/ginkgo/v2 v2.6.0
github.com/onsi/gomega v1.24.1
github.com/pkg/errors v0.9.1
@@ -19,14 +20,15 @@ require (
github.com/testcontainers/testcontainers-go v0.13.0
go.etcd.io/etcd/api/v3 v3.5.6
go.etcd.io/etcd/client/v3 v3.5.6
k8s.io/api v0.26.0
k8s.io/apimachinery v0.26.0
k8s.io/apiserver v0.26.0
k8s.io/client-go v0.26.0
go.uber.org/automaxprocs v1.5.1
k8s.io/api v0.26.1
k8s.io/apimachinery v0.26.1
k8s.io/apiserver v0.26.1
k8s.io/client-go v0.26.1
k8s.io/cluster-bootstrap v0.0.0
k8s.io/klog/v2 v2.80.1
k8s.io/kubelet v0.0.0
k8s.io/kubernetes v1.26.0
k8s.io/kubernetes v1.26.1
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
sigs.k8s.io/controller-runtime v0.14.0
)
@@ -84,6 +86,7 @@ require (
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/lithammer/dedent v1.1.0 // indirect
github.com/magiconair/properties v1.8.5 // indirect
@@ -144,9 +147,9 @@ require (
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.26.0 // indirect
k8s.io/cli-runtime v0.26.0 // indirect
k8s.io/component-base v0.26.0 // indirect
k8s.io/apiextensions-apiserver v0.26.1 // indirect
k8s.io/cli-runtime v0.26.1 // indirect
k8s.io/component-base v0.26.1 // indirect
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
k8s.io/kube-proxy v0.0.0 // indirect
k8s.io/system-validators v1.8.0 // indirect
@@ -159,32 +162,32 @@ require (
)
replace (
k8s.io/api => k8s.io/api v0.26.0
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.0
k8s.io/apimachinery => k8s.io/apimachinery v0.26.0
k8s.io/apiserver => k8s.io/apiserver v0.26.0
k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.0
k8s.io/client-go => k8s.io/client-go v0.26.0
k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.0
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.0
k8s.io/code-generator => k8s.io/code-generator v0.26.0
k8s.io/component-base => k8s.io/component-base v0.26.0
k8s.io/component-helpers => k8s.io/component-helpers v0.26.0
k8s.io/controller-manager => k8s.io/controller-manager v0.26.0
k8s.io/cri-api => k8s.io/cri-api v0.26.0
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.0
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.26.0
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.0
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.0
k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.0
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.0
k8s.io/kubectl => k8s.io/kubectl v0.26.0
k8s.io/kubelet => k8s.io/kubelet v0.26.0
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.0
k8s.io/metrics => k8s.io/metrics v0.26.0
k8s.io/mount-utils => k8s.io/mount-utils v0.26.0
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.0
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.0
k8s.io/api => k8s.io/api v0.26.1
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.1
k8s.io/apimachinery => k8s.io/apimachinery v0.26.1
k8s.io/apiserver => k8s.io/apiserver v0.26.1
k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.1
k8s.io/client-go => k8s.io/client-go v0.26.1
k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.1
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.1
k8s.io/code-generator => k8s.io/code-generator v0.26.1
k8s.io/component-base => k8s.io/component-base v0.26.1
k8s.io/component-helpers => k8s.io/component-helpers v0.26.1
k8s.io/controller-manager => k8s.io/controller-manager v0.26.1
k8s.io/cri-api => k8s.io/cri-api v0.26.1
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.1
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.26.1
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.1
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.1
k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.1
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.1
k8s.io/kubectl => k8s.io/kubectl v0.26.1
k8s.io/kubelet => k8s.io/kubelet v0.26.1
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.1
k8s.io/metrics => k8s.io/metrics v0.26.1
k8s.io/mount-utils => k8s.io/mount-utils v0.26.1
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.1
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.1
)
replace github.com/JamesStewy/go-mysqldump => github.com/vtoma/go-mysqldump v1.0.0

73
go.sum
View File

@@ -423,6 +423,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -447,7 +448,7 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cel-go v0.12.5/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -576,6 +577,18 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c h1:3UvYABOQRhJAApj9MdCN+Ydv841ETSoy6xLzdmmr/9A=
github.com/juju/collections v0.0.0-20200605021417-0d0ec82b7271 h1:4R626WTwa7pRYQFiIRLVPepMhm05eZMEx+wIurRnMLc=
github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 h1:EJHbsNpQyupmMeWTq7inn+5L/WZ7JfzCVPJ+DP9McCQ=
github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9/go.mod h1:TRm7EVGA3mQOqSVcBySRY7a9Y1/gyVhh/WTCnc5sD4U=
github.com/juju/loggo v0.0.0-20210728185423-eebad3a902c4 h1:NO5tuyw++EGLnz56Q8KMyDZRwJwWO8jQnj285J3FOmY=
github.com/juju/mgo/v2 v2.0.0-20210302023703-70d5d206e208 h1:/WiCm+Vpj87e4QWuWwPD/bNE9kDrWCLvPBHOQNcG2+A=
github.com/juju/mutex/v2 v2.0.0 h1:rVmJdOaXGWF8rjcFHBNd4x57/1tks5CgXHx55O55SB0=
github.com/juju/mutex/v2 v2.0.0/go.mod h1:jwCfBs/smYDaeZLqeaCi8CB8M+tOes4yf827HoOEoqk=
github.com/juju/retry v0.0.0-20180821225755-9058e192b216 h1:/eQL7EJQKFHByJe3DeE8Z36yqManj9UY5zppDoQi4FU=
github.com/juju/testing v0.0.0-20220203020004-a0ff61f03494 h1:XEDzpuZb8Ma7vLja3+5hzUqVTvAqm5Y+ygvnDs5iTMM=
github.com/juju/utils/v3 v3.0.0-20220130232349-cd7ecef0e94a h1:5ZWDCeCF0RaITrZGemzmDFIhjR/MVSvBUqgSyaeTMbE=
github.com/juju/version/v2 v2.0.0-20211007103408-2e8da085dc23 h1:wtEPbidt1VyHlb8RSztU6ySQj29FLsOQiI9XiJhXDM4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
@@ -591,6 +604,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@@ -665,7 +679,6 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
@@ -747,6 +760,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@@ -963,10 +977,11 @@ go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0H
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk=
go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
@@ -1509,8 +1524,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
@@ -1555,39 +1570,39 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I=
k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg=
k8s.io/apiextensions-apiserver v0.26.0 h1:Gy93Xo1eg2ZIkNX/8vy5xviVSxwQulsnUdQ00nEdpDo=
k8s.io/apiextensions-apiserver v0.26.0/go.mod h1:7ez0LTiyW5nq3vADtK6C3kMESxadD51Bh6uz3JOlqWQ=
k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg=
k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
k8s.io/apiserver v0.26.0 h1:q+LqIK5EZwdznGZb8bq0+a+vCqdeEEe4Ux3zsOjbc4o=
k8s.io/apiserver v0.26.0/go.mod h1:aWhlLD+mU+xRo+zhkvP/gFNbShI4wBDHS33o0+JGI84=
k8s.io/cli-runtime v0.26.0 h1:aQHa1SyUhpqxAw1fY21x2z2OS5RLtMJOCj7tN4oq8mw=
k8s.io/cli-runtime v0.26.0/go.mod h1:o+4KmwHzO/UK0wepE1qpRk6l3o60/txUZ1fEXWGIKTY=
k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8=
k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg=
k8s.io/cluster-bootstrap v0.26.0 h1:jd6T3WmpZo6TpmIHqg1wc4bX/BLsGC8Tzle/VKI9vRo=
k8s.io/cluster-bootstrap v0.26.0/go.mod h1:daR7iryq3QgPGqyuhlwdQ3jBkvrl2SBGjFYrcL6fZ7s=
k8s.io/component-base v0.26.0 h1:0IkChOCohtDHttmKuz+EP3j3+qKmV55rM9gIFTXA7Vs=
k8s.io/component-base v0.26.0/go.mod h1:lqHwlfV1/haa14F/Z5Zizk5QmzaVf23nQzCwVOQpfC8=
k8s.io/cri-api v0.26.0/go.mod h1:I5TGOn/ziMzqIcUvsYZzVE8xDAB1JBkvcwvR0yDreuw=
k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ=
k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg=
k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI=
k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM=
k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ=
k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
k8s.io/apiserver v0.26.1 h1:6vmnAqCDO194SVCPU3MU8NcDgSqsUA62tBUSWrFXhsc=
k8s.io/apiserver v0.26.1/go.mod h1:wr75z634Cv+sifswE9HlAo5FQ7UoUauIICRlOE+5dCg=
k8s.io/cli-runtime v0.26.1 h1:f9+bRQ1V3elQsx37KmZy5fRAh56mVLbE9A7EMdlqVdI=
k8s.io/cli-runtime v0.26.1/go.mod h1:+e5Ym/ARySKscUhZ8K3hZ+ZBo/wYPIcg+7b5sFYi6Gg=
k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU=
k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE=
k8s.io/cluster-bootstrap v0.26.1 h1:d36JXyk2/TBKqrUSXoCN6FyTTR3a7UOFVmQbm2YOGTA=
k8s.io/cluster-bootstrap v0.26.1/go.mod h1:Tf5X/siioEyBJjvQUzamT6w8KOnfT8QoIEoWyl2jb9k=
k8s.io/component-base v0.26.1 h1:4ahudpeQXHZL5kko+iDHqLj/FSGAEUnSVO0EBbgDd+4=
k8s.io/component-base v0.26.1/go.mod h1:VHrLR0b58oC035w6YQiBSbtsf0ThuSwXP+p5dD/kAWU=
k8s.io/cri-api v0.26.1/go.mod h1:I5TGOn/ziMzqIcUvsYZzVE8xDAB1JBkvcwvR0yDreuw=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kms v0.26.0/go.mod h1:ReC1IEGuxgfN+PDCIpR6w8+XMmDE7uJhxcCwMZFdIYc=
k8s.io/kms v0.26.1/go.mod h1:ReC1IEGuxgfN+PDCIpR6w8+XMmDE7uJhxcCwMZFdIYc=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
k8s.io/kube-proxy v0.26.0 h1:VBC83bWr5L4GKSxRFz0YBbwGgQITc0+p8avGzw0LNKo=
k8s.io/kube-proxy v0.26.0/go.mod h1:4kz3dPdMUnspJnFgoJG9lWn1UCiho85Gyn1WLInK0XA=
k8s.io/kubelet v0.26.0 h1:08bDb5IoUH/1K1t2NUwnGIIWxjm9LSqn6k3FWw1tJGI=
k8s.io/kubelet v0.26.0/go.mod h1:DluF+d8jS2nE/Hs7CC3QM+OZlIEb22NTOihQ3EDwCQ4=
k8s.io/kube-proxy v0.26.1 h1:uYt22aiLhIYKxMfmP0mxOMZn0co9UXwlA2uV0uJTDt4=
k8s.io/kube-proxy v0.26.1/go.mod h1:z7TSAvTeD8xmEzNGgwoiXZ0BCE13IPKXp/tSoBBNzaM=
k8s.io/kubelet v0.26.1 h1:wQyCQYmLW6GN3v7gVTxnc3jAE4zMYDlzdF3FZV4rKas=
k8s.io/kubelet v0.26.1/go.mod h1:gFVZ1Ab4XdjtnYdVRATwGwku7FhTxo6LVEZwYoQaDT8=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/kubernetes v1.26.0 h1:fL8VMr4xlfTazPORLhz5fsvO5I3bsFpmynVxZTH1ItQ=
k8s.io/kubernetes v1.26.0/go.mod h1:z0aCJwn6DxzB/dDiWLbQaJO5jWOR2qoaCMnmSAx45XM=
k8s.io/kubernetes v1.26.1 h1:N+qxlptxpSU/VSLvqBGWyyw/kNhJRpEn1b5YP57+5rk=
k8s.io/kubernetes v1.26.1/go.mod h1:dEfAfGVZBOr2uZLeVazLPj/8E+t8jYFbQqCiBudkB8o=
k8s.io/system-validators v1.8.0 h1:tq05tdO9zdJZnNF3SXrq6LE7Knc/KfJm5wk68467JDg=
k8s.io/system-validators v1.8.0/go.mod h1:gP1Ky+R9wtrSiFbrpEPwWMeYz9yqyy1S/KOh0Vci7WI=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
@@ -1601,7 +1616,7 @@ mellium.im/sasl v0.3.0/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33/go.mod h1:soWkSNf2tZC7aMibXEqVhCd73GOY5fJikn8qbdzemB0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo=
sigs.k8s.io/controller-runtime v0.14.0 h1:ju2xsov5Ara6FoQuddg+az+rAxsUsTYn2IYyEKCTyDc=
sigs.k8s.io/controller-runtime v0.14.0/go.mod h1:GaRkrY8a7UZF0kqFFbUKG7n9ICiTY5T55P1RiE3UZlU=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=

View File

@@ -59,17 +59,40 @@ func (d *Deployment) SetContainers(podSpec *corev1.PodSpec, tcp *kamajiv1alpha1.
d.buildKine(podSpec, tcp)
}
func (d *Deployment) SetStrategy(deployment *appsv1.DeploymentSpec) {
maxSurge := intstr.FromString("100%")
maxUnavailable := intstr.FromInt(0)
func (d *Deployment) SetStrategy(deployment *appsv1.DeploymentSpec, tcp *kamajiv1alpha1.TenantControlPlane) {
deployment.Strategy = appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
RollingUpdate: &appsv1.RollingUpdateDeployment{
Type: tcp.Spec.ControlPlane.Deployment.Strategy.Type,
}
// If it's recreate strategy, we don't need any RollingUpdate params
if tcp.Spec.ControlPlane.Deployment.Strategy.Type == appsv1.RecreateDeploymentStrategyType {
tcp.Spec.ControlPlane.Deployment.Strategy.RollingUpdate = nil
return
}
// In case of no RollingUpdate options, Kamaji will perform a blue/green rollout:
// this will ensure to avoid round-robin between old and new version,
// useful especially during the Kubernetes version upgrade phase.
if tcp.Spec.ControlPlane.Deployment.Strategy.RollingUpdate == nil {
maxSurge := intstr.FromString("100%")
maxUnavailable := intstr.FromInt(0)
deployment.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{
MaxUnavailable: &maxUnavailable,
MaxSurge: &maxSurge,
},
}
return
}
deployment.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{}
if tcp.Spec.ControlPlane.Deployment.Strategy.RollingUpdate.MaxUnavailable != nil {
deployment.Strategy.RollingUpdate.MaxUnavailable = tcp.Spec.ControlPlane.Deployment.Strategy.RollingUpdate.MaxUnavailable
}
if tcp.Spec.ControlPlane.Deployment.Strategy.RollingUpdate.MaxSurge != nil {
deployment.Strategy.RollingUpdate.MaxSurge = tcp.Spec.ControlPlane.Deployment.Strategy.RollingUpdate.MaxSurge
}
}
@@ -265,7 +288,7 @@ func (d *Deployment) BuildScheduler(podSpec *corev1.PodSpec, tenantControlPlane
args["--leader-elect"] = "true" //nolint:goconst
podSpec.Containers[schedulerIndex].Name = "kube-scheduler"
podSpec.Containers[schedulerIndex].Image = fmt.Sprintf("k8s.gcr.io/kube-scheduler:%s", tenantControlPlane.Spec.Kubernetes.Version)
podSpec.Containers[schedulerIndex].Image = fmt.Sprintf("registry.k8s.io/kube-scheduler:%s", tenantControlPlane.Spec.Kubernetes.Version)
podSpec.Containers[schedulerIndex].Command = []string{"kube-scheduler"}
podSpec.Containers[schedulerIndex].Args = utilities.ArgsFromMapToSlice(args)
podSpec.Containers[schedulerIndex].VolumeMounts = []corev1.VolumeMount{
@@ -350,7 +373,7 @@ func (d *Deployment) buildControllerManager(podSpec *corev1.PodSpec, tenantContr
args["--use-service-account-credentials"] = "true"
podSpec.Containers[controllerManagerIndex].Name = "kube-controller-manager"
podSpec.Containers[controllerManagerIndex].Image = fmt.Sprintf("k8s.gcr.io/kube-controller-manager:%s", tenantControlPlane.Spec.Kubernetes.Version)
podSpec.Containers[controllerManagerIndex].Image = fmt.Sprintf("registry.k8s.io/kube-controller-manager:%s", tenantControlPlane.Spec.Kubernetes.Version)
podSpec.Containers[controllerManagerIndex].Command = []string{"kube-controller-manager"}
podSpec.Containers[controllerManagerIndex].Args = utilities.ArgsFromMapToSlice(args)
podSpec.Containers[controllerManagerIndex].VolumeMounts = []corev1.VolumeMount{
@@ -438,7 +461,7 @@ func (d *Deployment) buildKubeAPIServer(podSpec *corev1.PodSpec, tenantControlPl
podSpec.Containers[apiServerIndex].Name = "kube-apiserver"
podSpec.Containers[apiServerIndex].Args = utilities.ArgsFromMapToSlice(args)
podSpec.Containers[apiServerIndex].Image = fmt.Sprintf("k8s.gcr.io/kube-apiserver:%s", tenantControlPlane.Spec.Kubernetes.Version)
podSpec.Containers[apiServerIndex].Image = fmt.Sprintf("registry.k8s.io/kube-apiserver:%s", tenantControlPlane.Spec.Kubernetes.Version)
podSpec.Containers[apiServerIndex].Command = []string{"kube-apiserver"}
podSpec.Containers[apiServerIndex].LivenessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
@@ -532,6 +555,12 @@ func (d *Deployment) buildKubeAPIServerCommand(tenantControlPlane *kamajiv1alpha
extraArgs = utilities.ArgsFromSliceToMap(tenantControlPlane.Spec.ControlPlane.Deployment.ExtraArgs.APIServer)
}
kubeletPreferredAddressTypes := make([]string, 0, len(tenantControlPlane.Spec.Kubernetes.Kubelet.PreferredAddressTypes))
for _, addressType := range tenantControlPlane.Spec.Kubernetes.Kubelet.PreferredAddressTypes {
kubeletPreferredAddressTypes = append(kubeletPreferredAddressTypes, string(addressType))
}
desiredArgs := map[string]string{
"--allow-privileged": "true",
"--authorization-mode": "Node,RBAC",
@@ -542,7 +571,7 @@ func (d *Deployment) buildKubeAPIServerCommand(tenantControlPlane *kamajiv1alpha
"--service-cluster-ip-range": tenantControlPlane.Spec.NetworkProfile.ServiceCIDR,
"--kubelet-client-certificate": path.Join(v1beta3.DefaultCertificatesDir, constants.APIServerKubeletClientCertName),
"--kubelet-client-key": path.Join(v1beta3.DefaultCertificatesDir, constants.APIServerKubeletClientKeyName),
"--kubelet-preferred-address-types": "Hostname,InternalIP,ExternalIP",
"--kubelet-preferred-address-types": strings.Join(kubeletPreferredAddressTypes, ","),
"--proxy-client-cert-file": path.Join(v1beta3.DefaultCertificatesDir, constants.FrontProxyClientCertName),
"--proxy-client-key-file": path.Join(v1beta3.DefaultCertificatesDir, constants.FrontProxyClientKeyName),
"--requestheader-allowed-names": constants.FrontProxyClientCertCommonName,
@@ -756,7 +785,7 @@ func (d *Deployment) buildKine(podSpec *corev1.PodSpec, tcp *kamajiv1alpha1.Tena
func (d *Deployment) SetSelector(deploymentSpec *appsv1.DeploymentSpec, tcp *kamajiv1alpha1.TenantControlPlane) {
deploymentSpec.Selector = &metav1.LabelSelector{
MatchLabels: map[string]string{
"kamaji.clastix.io/soot": tcp.GetName(),
"kamaji.clastix.io/name": tcp.GetName(),
},
}
}

View File

@@ -6,4 +6,7 @@ package constants
const (
ProjectNameLabelKey = "kamaji.clastix.io/project"
ProjectNameLabelValue = "kamaji"
ControlPlaneLabelKey = "kamaji.clastix.io/name"
ControlPlaneLabelResource = "kamaji.clastix.io/component"
)

View File

@@ -135,6 +135,32 @@ func IsValidCertificateKeyPairBytes(certificateBytes []byte, privateKeyBytes []b
}
}
func VerifyCertificate(cert, ca []byte, usages ...x509.ExtKeyUsage) (bool, error) {
if len(usages) == 0 {
return false, fmt.Errorf("missing usages for certificate verification")
}
crt, err := ParseCertificateBytes(cert)
if err != nil {
return false, err
}
caCrt, err := ParseCertificateBytes(ca)
if err != nil {
return false, err
}
roots := x509.NewCertPool()
roots.AddCert(caCrt)
chains, err := crt.Verify(x509.VerifyOptions{
Roots: roots,
KeyUsages: usages,
})
return len(chains) > 0, err
}
func generateCertificateKeyPairBytes(template *x509.Certificate, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*bytes.Buffer, *bytes.Buffer, error) {
certPrivKey, err := rsa.GenerateKey(cryptorand.Reader, 2048)
if err != nil {

View File

@@ -3,6 +3,12 @@
package errors
type MigrationInProcessError struct{}
func (n MigrationInProcessError) Error() string {
return "cannot continue reconciliation, the current TenantControlPlane is still in migration status"
}
type NonExposedLoadBalancerError struct{}
func (n NonExposedLoadBalancerError) Error() string {

View File

@@ -6,5 +6,14 @@ package errors
import "github.com/pkg/errors"
func ShouldReconcileErrorBeIgnored(err error) bool {
return errors.As(err, &NonExposedLoadBalancerError{}) || errors.As(err, &MissingValidIPError{})
switch {
case errors.As(err, &NonExposedLoadBalancerError{}):
return true
case errors.As(err, &MissingValidIPError{}):
return true
case errors.As(err, &MigrationInProcessError{}):
return true
default:
return false
}
}

View File

@@ -360,7 +360,7 @@ func (k *KubeProxy) decodeManifests(ctx context.Context, tcp *kamajiv1alpha1.Ten
if len(tcp.Spec.Addons.KubeProxy.ImageRepository) > 0 {
config.Parameters.KubeProxyOptions.Repository = tcp.Spec.Addons.KubeProxy.ImageRepository
} else {
config.Parameters.KubeProxyOptions.Repository = "k8s.gcr.io"
config.Parameters.KubeProxyOptions.Repository = "registry.k8s.io"
}
if len(tcp.Spec.Addons.KubeProxy.ImageTag) > 0 {

View File

@@ -5,6 +5,7 @@ package resources
import (
"context"
"crypto/x509"
"fmt"
corev1 "k8s.io/api/core/v1"
@@ -17,7 +18,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
@@ -30,7 +30,7 @@ type APIServerCertificate struct {
}
func (r *APIServerCertificate) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.APIServer.Checksum != r.resource.GetAnnotations()[constants.Checksum]
return tenantControlPlane.Status.Certificates.APIServer.Checksum != utilities.GetObjectChecksum(r.resource)
}
func (r *APIServerCertificate) ShouldCleanup(_ *kamajiv1alpha1.TenantControlPlane) bool {
@@ -75,7 +75,7 @@ func (r *APIServerCertificate) GetName() string {
func (r *APIServerCertificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.APIServer.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.APIServer.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.APIServer.Checksum = r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.Certificates.APIServer.Checksum = utilities.GetObjectChecksum(r.resource)
return nil
}
@@ -83,16 +83,31 @@ func (r *APIServerCertificate) UpdateTenantControlPlaneStatus(_ context.Context,
func (r *APIServerCertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
logger := log.FromContext(ctx, "resource", r.GetName())
// Retrieving the TenantControlPlane CA:
// this is required to trigger a new generation in case of Certificate Authority rotation.
namespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: tenantControlPlane.Status.Certificates.CA.SecretName}
secretCA := &corev1.Secret{}
if err := r.Client.Get(ctx, namespacedName, secretCA); err != nil {
logger.Error(err, "cannot retrieve CA secret")
if checksum := tenantControlPlane.Status.Certificates.APIServer.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()[constants.Checksum] {
isValid, err := crypto.CheckCertificateAndPrivateKeyPairValidity(
return err
}
if checksum := tenantControlPlane.Status.Certificates.APIServer.Checksum; len(checksum) > 0 && checksum == utilities.GetObjectChecksum(r.resource) || len(r.resource.UID) > 0 {
isCAValid, err := crypto.VerifyCertificate(r.resource.Data[kubeadmconstants.APIServerCertName], secretCA.Data[kubeadmconstants.CACertName], x509.ExtKeyUsageServerAuth)
if err != nil {
logger.Info(fmt.Sprintf("certificate-authority verify failed: %s", err.Error()))
}
isCertValid, err := crypto.CheckCertificateAndPrivateKeyPairValidity(
r.resource.Data[kubeadmconstants.APIServerCertName],
r.resource.Data[kubeadmconstants.APIServerKeyName],
)
if err != nil {
logger.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.APIServerCertAndKeyBaseName, err.Error()))
}
if isValid {
if isCAValid && isCertValid {
return nil
}
}
@@ -104,14 +119,6 @@ func (r *APIServerCertificate) mutate(ctx context.Context, tenantControlPlane *k
return err
}
namespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: tenantControlPlane.Status.Certificates.CA.SecretName}
secretCA := &corev1.Secret{}
if err = r.Client.Get(ctx, namespacedName, secretCA); err != nil {
logger.Error(err, "cannot retrieve CA secret")
return err
}
ca := kubeadm.CertificatePrivateKeyPair{
Name: kubeadmconstants.CACertAndKeyBaseName,
Certificate: secretCA.Data[kubeadmconstants.CACertName],
@@ -129,20 +136,9 @@ func (r *APIServerCertificate) mutate(ctx context.Context, tenantControlPlane *k
kubeadmconstants.APIServerKeyName: certificateKeyPair.PrivateKey,
}
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
utilities.SetObjectChecksum(r.resource, r.resource.Data)
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}

View File

@@ -5,6 +5,7 @@ package resources
import (
"context"
"crypto/x509"
"fmt"
corev1 "k8s.io/api/core/v1"
@@ -17,7 +18,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
@@ -30,7 +30,7 @@ type APIServerKubeletClientCertificate struct {
}
func (r *APIServerKubeletClientCertificate) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum != r.resource.GetAnnotations()[constants.Checksum]
return tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum != utilities.GetObjectChecksum(r.resource)
}
func (r *APIServerKubeletClientCertificate) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
@@ -75,7 +75,7 @@ func (r *APIServerKubeletClientCertificate) GetName() string {
func (r *APIServerKubeletClientCertificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.APIServerKubeletClient.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.APIServerKubeletClient.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum = r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum = utilities.GetObjectChecksum(r.resource)
return nil
}
@@ -83,8 +83,22 @@ func (r *APIServerKubeletClientCertificate) UpdateTenantControlPlaneStatus(_ con
func (r *APIServerKubeletClientCertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
logger := log.FromContext(ctx, "resource", r.GetName())
// Retrieving the TenantControlPlane CA:
// this is required to trigger a new generation in case of Certificate Authority rotation.
namespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: tenantControlPlane.Status.Certificates.CA.SecretName}
secretCA := &corev1.Secret{}
if err := r.Client.Get(ctx, namespacedName, secretCA); err != nil {
logger.Error(err, "cannot retrieve CA secret")
return err
}
if checksum := tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum; len(checksum) > 0 && checksum == utilities.GetObjectChecksum(r.resource) || len(r.resource.UID) > 0 {
isCAValid, err := crypto.VerifyCertificate(r.resource.Data[kubeadmconstants.APIServerKubeletClientCertName], secretCA.Data[kubeadmconstants.CACertName], x509.ExtKeyUsageClientAuth)
if err != nil {
logger.Info(fmt.Sprintf("certificate-authority verify failed: %s", err.Error()))
}
if checksum := tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()[constants.Checksum] {
isValid, err := crypto.CheckCertificateAndPrivateKeyPairValidity(
r.resource.Data[kubeadmconstants.APIServerKubeletClientCertName],
r.resource.Data[kubeadmconstants.APIServerKubeletClientKeyName],
@@ -92,7 +106,8 @@ func (r *APIServerKubeletClientCertificate) mutate(ctx context.Context, tenantCo
if err != nil {
logger.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName, err.Error()))
}
if isValid {
if isValid && isCAValid {
return nil
}
}
@@ -104,15 +119,6 @@ func (r *APIServerKubeletClientCertificate) mutate(ctx context.Context, tenantCo
return err
}
namespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: tenantControlPlane.Status.Certificates.CA.SecretName}
secretCA := &corev1.Secret{}
if err = r.Client.Get(ctx, namespacedName, secretCA); err != nil {
logger.Error(err, "cannot retrieve CA secret")
return err
}
ca := kubeadm.CertificatePrivateKeyPair{
Name: kubeadmconstants.CACertAndKeyBaseName,
Certificate: secretCA.Data[kubeadmconstants.CACertName],
@@ -130,20 +136,9 @@ func (r *APIServerKubeletClientCertificate) mutate(ctx context.Context, tenantCo
kubeadmconstants.APIServerKubeletClientKeyName: certificateKeyPair.PrivateKey,
}
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
utilities.SetObjectChecksum(r.resource, r.resource.Data)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}

View File

@@ -16,7 +16,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
@@ -24,13 +23,15 @@ import (
type CACertificate struct {
resource *corev1.Secret
isRotatingCA bool
Client client.Client
TmpDirectory string
}
func (r *CACertificate) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.CA.SecretName != r.resource.GetName() ||
tenantControlPlane.Status.Certificates.CA.Checksum != r.resource.GetAnnotations()[constants.Checksum]
return r.isRotatingCA || tenantControlPlane.Status.Certificates.CA.SecretName != r.resource.GetName() ||
tenantControlPlane.Status.Certificates.CA.Checksum != utilities.GetObjectChecksum(r.resource)
}
func (r *CACertificate) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
@@ -75,7 +76,10 @@ func (r *CACertificate) GetName() string {
func (r *CACertificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.CA.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.CA.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.CA.Checksum = r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.Certificates.CA.Checksum = utilities.GetObjectChecksum(r.resource)
if r.isRotatingCA {
tenantControlPlane.Status.Kubernetes.Version.Status = &kamajiv1alpha1.VersionCARotating
}
return nil
}
@@ -84,7 +88,7 @@ func (r *CACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1
return func() error {
logger := log.FromContext(ctx, "resource", r.GetName())
if checksum := tenantControlPlane.Status.Certificates.CA.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()[constants.Checksum] {
if checksum := tenantControlPlane.Status.Certificates.CA.Checksum; len(checksum) > 0 && checksum == utilities.GetObjectChecksum(r.resource) || len(r.resource.UID) > 0 {
isValid, err := crypto.CheckCertificateAndPrivateKeyPairValidity(
r.resource.Data[kubeadmconstants.CACertName],
r.resource.Data[kubeadmconstants.CAKeyName],
@@ -97,6 +101,10 @@ func (r *CACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1
}
}
if tenantControlPlane.Status.Kubernetes.Version.Status != nil && *tenantControlPlane.Status.Kubernetes.Version.Status != kamajiv1alpha1.VersionProvisioning {
r.isRotatingCA = true
}
config, err := getStoredKubeadmConfiguration(ctx, r.Client, r.TmpDirectory, tenantControlPlane)
if err != nil {
logger.Error(err, "cannot retrieve kubeadm configuration")
@@ -116,20 +124,9 @@ func (r *CACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1
kubeadmconstants.CAKeyName: ca.PrivateKey,
}
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
utilities.SetObjectChecksum(r.resource, r.resource.Data)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}

View File

@@ -16,7 +16,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -29,7 +28,7 @@ type Certificate struct {
}
func (r *Certificate) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Storage.Certificate.Checksum != r.resource.GetAnnotations()[constants.Checksum]
return tenantControlPlane.Status.Storage.Certificate.Checksum != utilities.GetObjectChecksum(r.resource)
}
func (r *Certificate) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
@@ -70,7 +69,7 @@ func (r *Certificate) GetName() string {
func (r *Certificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Storage.Certificate.SecretName = r.resource.GetName()
tenantControlPlane.Status.Storage.Certificate.Checksum = r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.Storage.Certificate.Checksum = utilities.GetObjectChecksum(r.resource)
tenantControlPlane.Status.Storage.Certificate.LastUpdate = metav1.Now()
return nil
@@ -89,7 +88,7 @@ func (r *Certificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1al
r.resource.Data["ca.crt"] = ca
if r.resource.GetAnnotations()[constants.Checksum] == utilities.CalculateMapChecksum(r.resource.Data) {
if utilities.GetObjectChecksum(r.resource) == utilities.CalculateMapChecksum(r.resource.Data) {
if r.DataStore.Spec.Driver == kamajiv1alpha1.EtcdDriver {
if isValid, _ := crypto.IsValidCertificateKeyPairBytes(r.resource.Data["server.crt"], r.resource.Data["server.key"]); isValid {
return nil
@@ -140,20 +139,11 @@ func (r *Certificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1al
r.resource.Data["server.crt"] = crt.Bytes()
r.resource.Data["server.key"] = key.Bytes()
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
utilities.SetObjectChecksum(r.resource, r.resource.Data)
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()),
r.resource.GetLabels(),
map[string]string{
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())

View File

@@ -14,9 +14,10 @@ import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
kamajierrors "github.com/clastix/kamaji/internal/errors"
"github.com/clastix/kamaji/internal/resources"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -70,8 +71,8 @@ func (d *Migrate) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1
return nil
}
func (d *Migrate) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
return d.ShouldCleanUp
func (d *Migrate) ShouldCleanup(tcp *kamajiv1alpha1.TenantControlPlane) bool {
return d.ShouldCleanUp && *tcp.Status.Kubernetes.Version.Status == kamajiv1alpha1.VersionMigrating
}
func (d *Migrate) CleanUp(ctx context.Context, _ *kamajiv1alpha1.TenantControlPlane) (bool, error) {
@@ -125,26 +126,22 @@ func (d *Migrate) CreateOrUpdate(ctx context.Context, tenantControlPlane *kamaji
}
switch res {
case controllerutil.OperationResultNone:
if len(d.job.Status.Conditions) == 0 {
break
}
case controllerutil.OperationResultCreated, controllerutil.OperationResultUpdated:
d.inProgress = true
condition := d.job.Status.Conditions[0]
if condition.Type == batchv1.JobComplete && condition.Status == corev1.ConditionTrue {
return resources.OperationResultEnqueueBack, nil
case controllerutil.OperationResultNone:
if len(d.job.Status.Conditions) > 0 && d.job.Status.Conditions[0].Type == batchv1.JobComplete && d.job.Status.Conditions[0].Status == corev1.ConditionTrue {
return controllerutil.OperationResultNone, nil
}
log.FromContext(ctx).Info("migration job not yet completed", "reason", condition.Reason, "message", condition.Message)
case controllerutil.OperationResultCreated:
break
d.inProgress = true
return controllerutil.OperationResultNone, kamajierrors.MigrationInProcessError{}
default:
return "", fmt.Errorf("unexpected status %s from the migration job", res)
return controllerutil.OperationResultNone, fmt.Errorf("unexpected status %s from the migration job", res)
}
d.inProgress = true
return controllerutil.OperationResultNone, nil
}
func (d *Migrate) GetName() string {

View File

@@ -15,7 +15,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -27,7 +26,7 @@ type Config struct {
}
func (r *Config) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Storage.Config.Checksum != r.resource.GetAnnotations()[constants.Checksum] ||
return tenantControlPlane.Status.Storage.Config.Checksum != utilities.GetObjectChecksum(r.resource) ||
tenantControlPlane.Status.Storage.DataStoreName != r.DataStore.GetName()
}
@@ -70,7 +69,7 @@ func (r *Config) UpdateTenantControlPlaneStatus(_ context.Context, tenantControl
tenantControlPlane.Status.Storage.Driver = string(r.DataStore.Spec.Driver)
tenantControlPlane.Status.Storage.DataStoreName = r.DataStore.GetName()
tenantControlPlane.Status.Storage.Config.SecretName = r.resource.GetName()
tenantControlPlane.Status.Storage.Config.Checksum = r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.Storage.Config.Checksum = utilities.GetObjectChecksum(r.resource)
return nil
}
@@ -79,9 +78,9 @@ func (r *Config) mutate(_ context.Context, tenantControlPlane *kamajiv1alpha1.Te
return func() error {
var password []byte
savedHash, ok := r.resource.GetAnnotations()[constants.Checksum]
hash := utilities.GetObjectChecksum(r.resource)
switch {
case ok && savedHash == utilities.CalculateMapChecksum(r.resource.Data):
case len(hash) > 0 && hash == utilities.CalculateMapChecksum(r.resource.Data):
password = r.resource.Data["DB_PASSWORD"]
default:
password = []byte(uuid.New().String())
@@ -106,21 +105,9 @@ func (r *Config) mutate(_ context.Context, tenantControlPlane *kamajiv1alpha1.Te
"DB_PASSWORD": password,
}
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
utilities.SetObjectChecksum(r.resource, r.resource.Data)
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}

View File

@@ -1,174 +0,0 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package resources
import (
"context"
"fmt"
"time"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/utilities"
)
type DatastoreMigrate struct {
Client client.Client
KamajiNamespace string
KamajiServiceAccount string
MigrateImage string
ShouldCleanUp bool
actualDatastore *kamajiv1alpha1.DataStore
desiredDatastore *kamajiv1alpha1.DataStore
job *batchv1.Job
inProgress bool
}
func (d *DatastoreMigrate) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
if len(tenantControlPlane.Status.Storage.DataStoreName) == 0 {
return nil
}
d.job = &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("migrate-%s-%s", tenantControlPlane.GetNamespace(), tenantControlPlane.GetName()),
Namespace: d.KamajiNamespace,
},
}
if d.ShouldCleanUp {
return nil
}
if err := d.Client.Get(ctx, types.NamespacedName{Name: d.job.GetName(), Namespace: d.job.GetNamespace()}, d.job); err != nil {
if !errors.IsNotFound(err) {
return err
}
}
d.actualDatastore = &kamajiv1alpha1.DataStore{}
if err := d.Client.Get(ctx, types.NamespacedName{Name: tenantControlPlane.Status.Storage.DataStoreName}, d.actualDatastore); err != nil {
return err
}
d.desiredDatastore = &kamajiv1alpha1.DataStore{}
if err := d.Client.Get(ctx, types.NamespacedName{Name: tenantControlPlane.Spec.DataStore}, d.desiredDatastore); err != nil {
return err
}
return nil
}
func (d *DatastoreMigrate) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
return d.ShouldCleanUp
}
func (d *DatastoreMigrate) CleanUp(ctx context.Context, _ *kamajiv1alpha1.TenantControlPlane) (bool, error) {
if err := d.Client.Get(ctx, types.NamespacedName{Name: d.job.GetName(), Namespace: d.job.GetNamespace()}, d.job); err != nil && errors.IsNotFound(err) {
return false, nil
}
err := d.Client.Delete(ctx, d.job)
return err == nil, err
}
func (d *DatastoreMigrate) CreateOrUpdate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
if d.desiredDatastore == nil {
return controllerutil.OperationResultNone, nil
}
if d.actualDatastore.GetName() == d.desiredDatastore.GetName() {
return controllerutil.OperationResultNone, nil
}
res, err := utilities.CreateOrUpdateWithConflict(ctx, d.Client, d.job, func() error {
d.job.SetLabels(map[string]string{
"tcp.kamaji.clastix.io/name": tenantControlPlane.GetName(),
"tcp.kamaji.clastix.io/namespace": tenantControlPlane.GetNamespace(),
"kamaji.clastix.io/component": "migrate",
"migrate.kamaji.clastix.io/driver": tenantControlPlane.Status.Storage.Driver,
"migrate.kamaji.clastix.io/from": tenantControlPlane.Status.Storage.DataStoreName,
"migrate.kamaji.clastix.io/to": tenantControlPlane.Spec.DataStore,
})
d.job.Spec.Template.ObjectMeta.Labels = utilities.MergeMaps(d.job.Spec.Template.ObjectMeta.Labels, d.job.Spec.Template.ObjectMeta.Labels)
d.job.Spec.Template.Spec.ServiceAccountName = d.KamajiServiceAccount
d.job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyOnFailure
if len(d.job.Spec.Template.Spec.Containers) == 0 {
d.job.Spec.Template.Spec.Containers = append(d.job.Spec.Template.Spec.Containers, corev1.Container{})
}
d.job.Spec.Template.Spec.Containers[0].Name = "migrate"
d.job.Spec.Template.Spec.Containers[0].Image = d.MigrateImage
d.job.Spec.Template.Spec.Containers[0].Command = []string{"/kamaji"}
d.job.Spec.Template.Spec.Containers[0].Args = []string{
"migrate",
fmt.Sprintf("--tenant-control-plane=%s/%s", tenantControlPlane.GetNamespace(), tenantControlPlane.GetName()),
fmt.Sprintf("--target-datastore=%s", tenantControlPlane.Spec.DataStore),
}
// Allowing custom migrate timeout by reading TCP annotation
annotations := tenantControlPlane.GetAnnotations()
if v, ok := annotations["migrate.kamaji.clastix.io/timeout"]; ok {
timeout, parseErr := time.ParseDuration(v)
if parseErr != nil {
log.FromContext(ctx).Error(parseErr, "cannot override default migrate timeout")
} else {
d.job.Spec.Template.Spec.Containers[0].Args = append(d.job.Spec.Template.Spec.Containers[0].Args, fmt.Sprintf("--timeout=%s", timeout))
}
}
return nil
})
if err != nil {
return res, err
}
switch res {
case controllerutil.OperationResultNone:
if len(d.job.Status.Conditions) == 0 {
break
}
condition := d.job.Status.Conditions[0]
if condition.Type == batchv1.JobComplete && condition.Status == corev1.ConditionTrue {
return controllerutil.OperationResultNone, nil
}
log.FromContext(ctx).Info("migration job not yet completed", "reason", condition.Reason, "message", condition.Message)
case controllerutil.OperationResultCreated:
break
default:
return "", fmt.Errorf("unexpected status %s from the migration job", res)
}
d.inProgress = true
return controllerutil.OperationResultNone, nil
}
func (d *DatastoreMigrate) GetName() string {
return "migrate"
}
func (d *DatastoreMigrate) ShouldStatusBeUpdated(context.Context, *kamajiv1alpha1.TenantControlPlane) bool {
return d.inProgress
}
func (d *DatastoreMigrate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
if d.inProgress {
tenantControlPlane.Status.Kubernetes.Version.Status = &kamajiv1alpha1.VersionMigrating
}
return nil
}

View File

@@ -5,6 +5,7 @@ package resources
import (
"context"
"crypto/x509"
"fmt"
corev1 "k8s.io/api/core/v1"
@@ -17,7 +18,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
@@ -30,7 +30,7 @@ type FrontProxyClientCertificate struct {
}
func (r *FrontProxyClientCertificate) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum != r.resource.GetAnnotations()[constants.Checksum]
return tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum != utilities.GetObjectChecksum(r.resource)
}
func (r *FrontProxyClientCertificate) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
@@ -75,7 +75,7 @@ func (r *FrontProxyClientCertificate) GetName() string {
func (r *FrontProxyClientCertificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.FrontProxyClient.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.FrontProxyClient.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum = r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum = utilities.GetObjectChecksum(r.resource)
return nil
}
@@ -83,8 +83,21 @@ func (r *FrontProxyClientCertificate) UpdateTenantControlPlaneStatus(_ context.C
func (r *FrontProxyClientCertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
logger := log.FromContext(ctx, "resource", r.GetName())
// Retrieving the TenantControlPlane CA:
// this is required to trigger a new generation in case of Certificate Authority rotation.
namespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: tenantControlPlane.Status.Certificates.FrontProxyCA.SecretName}
secretCA := &corev1.Secret{}
if err := r.Client.Get(ctx, namespacedName, secretCA); err != nil {
logger.Error(err, "cannot retrieve CA secret")
return err
}
if checksum := tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum; len(checksum) > 0 && checksum == utilities.GetObjectChecksum(r.resource) || len(r.resource.UID) > 0 {
isCAValid, err := crypto.VerifyCertificate(r.resource.Data[kubeadmconstants.FrontProxyClientCertName], secretCA.Data[kubeadmconstants.FrontProxyCACertName], x509.ExtKeyUsageClientAuth)
if err != nil {
logger.Info(fmt.Sprintf("certificate-authority verify failed: %s", err.Error()))
}
if checksum := tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()[constants.Checksum] {
isValid, err := crypto.CheckCertificateAndPrivateKeyPairValidity(
r.resource.Data[kubeadmconstants.FrontProxyClientCertName],
r.resource.Data[kubeadmconstants.FrontProxyClientKeyName],
@@ -92,7 +105,8 @@ func (r *FrontProxyClientCertificate) mutate(ctx context.Context, tenantControlP
if err != nil {
logger.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.FrontProxyClientCertAndKeyBaseName, err.Error()))
}
if isValid {
if isValid && isCAValid {
return nil
}
}
@@ -104,14 +118,6 @@ func (r *FrontProxyClientCertificate) mutate(ctx context.Context, tenantControlP
return err
}
namespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: tenantControlPlane.Status.Certificates.FrontProxyCA.SecretName}
secretCA := &corev1.Secret{}
if err = r.Client.Get(ctx, namespacedName, secretCA); err != nil {
logger.Error(err, "cannot retrieve CA secret")
return err
}
ca := kubeadm.CertificatePrivateKeyPair{
Name: kubeadmconstants.FrontProxyCACertAndKeyBaseName,
Certificate: secretCA.Data[kubeadmconstants.FrontProxyCACertName],
@@ -129,20 +135,9 @@ func (r *FrontProxyClientCertificate) mutate(ctx context.Context, tenantControlP
kubeadmconstants.FrontProxyClientKeyName: certificateKeyPair.PrivateKey,
}
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
utilities.SetObjectChecksum(r.resource, r.resource.Data)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}

View File

@@ -16,7 +16,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
@@ -29,7 +28,7 @@ type FrontProxyCACertificate struct {
}
func (r *FrontProxyCACertificate) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum != r.resource.GetAnnotations()[constants.Checksum]
return tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum != utilities.GetObjectChecksum(r.resource)
}
func (r *FrontProxyCACertificate) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
@@ -74,7 +73,7 @@ func (r *FrontProxyCACertificate) GetName() string {
func (r *FrontProxyCACertificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.FrontProxyCA.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.FrontProxyCA.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum = r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum = utilities.GetObjectChecksum(r.resource)
return nil
}
@@ -83,7 +82,7 @@ func (r *FrontProxyCACertificate) mutate(ctx context.Context, tenantControlPlane
return func() error {
logger := log.FromContext(ctx, "resource", r.GetName())
if checksum := tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()[constants.Checksum] {
if checksum := tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum; len(checksum) > 0 && checksum == utilities.GetObjectChecksum(r.resource) || len(r.resource.UID) > 0 {
isValid, err := crypto.CheckCertificateAndPrivateKeyPairValidity(
r.resource.Data[kubeadmconstants.FrontProxyCACertName],
r.resource.Data[kubeadmconstants.FrontProxyCAKeyName],
@@ -115,20 +114,9 @@ func (r *FrontProxyCACertificate) mutate(ctx context.Context, tenantControlPlane
kubeadmconstants.FrontProxyCAKeyName: ca.PrivateKey,
}
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
utilities.SetObjectChecksum(r.resource, r.resource.Data)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}

View File

@@ -76,13 +76,13 @@ func (r *KubernetesDeploymentResource) mutate(ctx context.Context, tenantControl
DataStore: r.DataStore,
KineContainerImage: r.KineContainerImage,
}
d.SetLabels(r.resource, utilities.MergeMaps(utilities.CommonLabels(tenantControlPlane.GetName()), tenantControlPlane.Spec.ControlPlane.Deployment.AdditionalMetadata.Labels))
d.SetLabels(r.resource, utilities.MergeMaps(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()), tenantControlPlane.Spec.ControlPlane.Deployment.AdditionalMetadata.Labels))
d.SetAnnotations(r.resource, utilities.MergeMaps(r.resource.Annotations, tenantControlPlane.Spec.ControlPlane.Deployment.AdditionalMetadata.Annotations))
d.SetTemplateLabels(&r.resource.Spec.Template, r.deploymentTemplateLabels(ctx, tenantControlPlane))
d.SetNodeSelector(&r.resource.Spec.Template.Spec, tenantControlPlane)
d.SetToleration(&r.resource.Spec.Template.Spec, tenantControlPlane)
d.SetAffinity(&r.resource.Spec.Template.Spec, tenantControlPlane)
d.SetStrategy(&r.resource.Spec)
d.SetStrategy(&r.resource.Spec, tenantControlPlane)
d.SetSelector(&r.resource.Spec, tenantControlPlane)
d.SetTopologySpreadConstraints(&r.resource.Spec, tenantControlPlane.Spec.ControlPlane.Deployment.TopologySpreadConstraints)
d.SetRuntimeClass(&r.resource.Spec.Template.Spec, tenantControlPlane)
@@ -135,7 +135,8 @@ func (r *KubernetesDeploymentResource) deploymentTemplateLabels(ctx context.Cont
}
labels = map[string]string{
"kamaji.clastix.io/soot": tenantControlPlane.GetName(),
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
"component.kamaji.clastix.io/api-server-certificate": hash(ctx, tenantControlPlane.GetNamespace(), tenantControlPlane.Status.Certificates.APIServer.SecretName),
"component.kamaji.clastix.io/api-server-kubelet-client-certificate": hash(ctx, tenantControlPlane.GetNamespace(), tenantControlPlane.Status.Certificates.APIServerKubeletClient.SecretName),
"component.kamaji.clastix.io/ca": hash(ctx, tenantControlPlane.GetNamespace(), tenantControlPlane.Status.Certificates.CA.SecretName),

View File

@@ -69,7 +69,6 @@ func (r *KubernetesIngressResource) Define(_ context.Context, tenantControlPlane
ObjectMeta: metav1.ObjectMeta{
Name: tenantControlPlane.GetName(),
Namespace: tenantControlPlane.GetNamespace(),
Labels: utilities.CommonLabels(tenantControlPlane.GetName()),
},
}
@@ -80,7 +79,7 @@ func (r *KubernetesIngressResource) Define(_ context.Context, tenantControlPlane
func (r *KubernetesIngressResource) mutate(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
labels := utilities.MergeMaps(r.resource.GetLabels(), tenantControlPlane.Spec.ControlPlane.Ingress.AdditionalMetadata.Labels)
labels := utilities.MergeMaps(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()), tenantControlPlane.Spec.ControlPlane.Ingress.AdditionalMetadata.Labels)
r.resource.SetLabels(labels)
annotations := utilities.MergeMaps(r.resource.GetAnnotations(), tenantControlPlane.Spec.ControlPlane.Ingress.AdditionalMetadata.Annotations)

View File

@@ -80,14 +80,14 @@ func (r *KubernetesServiceResource) mutate(ctx context.Context, tenantControlPla
address, _ := tenantControlPlane.DeclaredControlPlaneAddress(ctx, r.Client)
return func() error {
labels := utilities.MergeMaps(utilities.CommonLabels(tenantControlPlane.GetName()), tenantControlPlane.Spec.ControlPlane.Service.AdditionalMetadata.Labels)
labels := utilities.MergeMaps(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()), tenantControlPlane.Spec.ControlPlane.Service.AdditionalMetadata.Labels)
r.resource.SetLabels(labels)
annotations := utilities.MergeMaps(r.resource.GetAnnotations(), tenantControlPlane.Spec.ControlPlane.Service.AdditionalMetadata.Annotations)
r.resource.SetAnnotations(annotations)
r.resource.Spec.Selector = map[string]string{
"kamaji.clastix.io/soot": tenantControlPlane.GetName(),
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
}
if len(r.resource.Spec.Ports) == 0 {

View File

@@ -109,13 +109,7 @@ func (r *Agent) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.T
return err
}
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"k8s-app": AgentName,
"addonmanager.kubernetes.io/mode": "Reconcile",
},
))
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
if r.resource.Spec.Selector == nil {
r.resource.Spec.Selector = &metav1.LabelSelector{}

View File

@@ -18,7 +18,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
@@ -30,7 +29,7 @@ type CertificateResource struct {
}
func (r *CertificateResource) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum != r.resource.GetAnnotations()[constants.Checksum]
return tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum != utilities.GetObjectChecksum(r.resource)
}
func (r *CertificateResource) ShouldCleanup(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -76,7 +75,7 @@ func (r *CertificateResource) UpdateTenantControlPlaneStatus(ctx context.Context
if tenantControlPlane.Spec.Addons.Konnectivity != nil {
tenantControlPlane.Status.Addons.Konnectivity.Certificate.LastUpdate = metav1.Now()
tenantControlPlane.Status.Addons.Konnectivity.Certificate.SecretName = r.resource.GetName()
tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum = r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum = utilities.GetObjectChecksum(r.resource)
return nil
}
@@ -127,20 +126,9 @@ func (r *CertificateResource) mutate(ctx context.Context, tenantControlPlane *ka
corev1.TLSPrivateKeyKey: privKey.Bytes(),
}
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
utilities.SetObjectChecksum(r.resource, r.resource.Data)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}

View File

@@ -68,7 +68,7 @@ func (r *ClusterRoleBindingResource) Define(ctx context.Context, tenantControlPl
func (r *ClusterRoleBindingResource) CreateOrUpdate(ctx context.Context, tcp *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
if tcp.Spec.Addons.Konnectivity != nil {
return controllerutil.CreateOrUpdate(ctx, r.tenantClient, r.resource, r.mutate())
return controllerutil.CreateOrUpdate(ctx, r.tenantClient, r.resource, r.mutate(tcp))
}
return controllerutil.OperationResultNone, nil
@@ -93,10 +93,10 @@ func (r *ClusterRoleBindingResource) UpdateTenantControlPlaneStatus(_ context.Co
return nil
}
func (r *ClusterRoleBindingResource) mutate() controllerutil.MutateFn {
func (r *ClusterRoleBindingResource) mutate(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()),
map[string]string{
"kubernetes.io/cluster-service": "true",
"addonmanager.kubernetes.io/mode": "Reconcile",

View File

@@ -16,7 +16,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -65,13 +64,13 @@ func (r *EgressSelectorConfigurationResource) GetName() string {
}
func (r *EgressSelectorConfigurationResource) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Addons.Konnectivity.ConfigMap.Checksum != r.resource.GetAnnotations()[constants.Checksum]
return tenantControlPlane.Status.Addons.Konnectivity.ConfigMap.Checksum != utilities.GetObjectChecksum(r.resource)
}
func (r *EgressSelectorConfigurationResource) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
if tenantControlPlane.Spec.Addons.Konnectivity != nil {
tenantControlPlane.Status.Addons.Konnectivity.ConfigMap.Name = r.resource.GetName()
tenantControlPlane.Status.Addons.Konnectivity.ConfigMap.Checksum = r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.Addons.Konnectivity.ConfigMap.Checksum = utilities.GetObjectChecksum(r.resource)
return nil
}
@@ -83,7 +82,7 @@ func (r *EgressSelectorConfigurationResource) UpdateTenantControlPlaneStatus(ctx
func (r *EgressSelectorConfigurationResource) mutate(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) func() error {
return func() error {
r.resource.SetLabels(utilities.MergeMaps(r.resource.GetLabels(), utilities.KamajiLabels()))
r.resource.SetLabels(utilities.MergeMaps(r.resource.GetLabels(), utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName())))
configuration := &apiserverv1alpha1.EgressSelectorConfiguration{
TypeMeta: metav1.TypeMeta{
@@ -114,11 +113,7 @@ func (r *EgressSelectorConfigurationResource) mutate(_ context.Context, tenantCo
"egress-selector-configuration.yaml": string(yamlConfiguration),
}
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
utilities.SetObjectChecksum(r.resource, r.resource.Data)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}

View File

@@ -19,7 +19,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -29,7 +28,7 @@ type KubeconfigResource struct {
}
func (r *KubeconfigResource) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Addons.Konnectivity.Kubeconfig.Checksum != r.resource.GetAnnotations()[constants.Checksum]
return tenantControlPlane.Status.Addons.Konnectivity.Kubeconfig.Checksum != utilities.GetObjectChecksum(r.resource)
}
func (r *KubeconfigResource) ShouldCleanup(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -74,7 +73,7 @@ func (r *KubeconfigResource) UpdateTenantControlPlaneStatus(_ context.Context, t
if tenantControlPlane.Spec.Addons.Konnectivity != nil {
tenantControlPlane.Status.Addons.Konnectivity.Kubeconfig.LastUpdate = metav1.Now()
tenantControlPlane.Status.Addons.Konnectivity.Kubeconfig.SecretName = r.resource.GetName()
tenantControlPlane.Status.Addons.Konnectivity.Kubeconfig.Checksum = r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.Addons.Konnectivity.Kubeconfig.Checksum = utilities.GetObjectChecksum(r.resource)
return nil
}
@@ -88,7 +87,7 @@ func (r *KubeconfigResource) mutate(ctx context.Context, tenantControlPlane *kam
return func() error {
logger := log.FromContext(ctx, "resource", r.GetName())
if checksum := tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()[constants.Checksum] {
if checksum := tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum; len(checksum) > 0 && checksum == utilities.GetObjectChecksum(r.resource) {
return nil
}
@@ -156,18 +155,9 @@ func (r *KubeconfigResource) mutate(ctx context.Context, tenantControlPlane *kam
konnectivityKubeconfigFileName: kubeconfigBytes,
}
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
utilities.SetObjectChecksum(r.resource, r.resource.Data)
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}

View File

@@ -69,7 +69,7 @@ func (r *ServiceAccountResource) Define(ctx context.Context, tenantControlPlane
func (r *ServiceAccountResource) CreateOrUpdate(ctx context.Context, tcp *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
if tcp.Spec.Addons.Konnectivity != nil {
return controllerutil.CreateOrUpdate(ctx, r.tenantClient, r.resource, r.mutate())
return controllerutil.CreateOrUpdate(ctx, r.tenantClient, r.resource, r.mutate(tcp))
}
return controllerutil.OperationResultNone, nil
@@ -94,15 +94,9 @@ func (r *ServiceAccountResource) UpdateTenantControlPlaneStatus(_ context.Contex
return nil
}
func (r *ServiceAccountResource) mutate() controllerutil.MutateFn {
func (r *ServiceAccountResource) mutate(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"kubernetes.io/cluster-service": "true",
"addonmanager.kubernetes.io/mode": "Reconcile",
},
))
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
return nil
}

View File

@@ -15,7 +15,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -28,7 +27,7 @@ type KubeadmConfigResource struct {
}
func (r *KubeadmConfigResource) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.KubeadmConfig.Checksum != r.resource.GetAnnotations()[constants.Checksum]
return tenantControlPlane.Status.KubeadmConfig.Checksum != utilities.GetObjectChecksum(r.resource)
}
func (r *KubeadmConfigResource) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
@@ -64,7 +63,7 @@ func (r *KubeadmConfigResource) GetName() string {
func (r *KubeadmConfigResource) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.KubeadmConfig.LastUpdate = metav1.Now()
tenantControlPlane.Status.KubeadmConfig.Checksum = r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.KubeadmConfig.Checksum = utilities.GetObjectChecksum(r.resource)
tenantControlPlane.Status.KubeadmConfig.ConfigmapName = r.resource.GetName()
return nil
@@ -89,7 +88,7 @@ func (r *KubeadmConfigResource) mutate(ctx context.Context, tenantControlPlane *
return err
}
r.resource.SetLabels(utilities.KamajiLabels())
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
params := kubeadm.Parameters{
TenantControlPlaneAddress: address,
@@ -115,12 +114,7 @@ func (r *KubeadmConfigResource) mutate(ctx context.Context, tenantControlPlane *
return err
}
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
utilities.SetObjectChecksum(r.resource, r.resource.Data)
if err := ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme()); err != nil {
return err

View File

@@ -30,7 +30,7 @@ const (
)
func (d kubeadmPhase) String() string {
return [...]string{"PhaseUploadConfigKubeadm", "PhaseUploadConfigKubelet", "PhaseAddonCoreDNS", "PhaseAddonKubeProxy", "PhaseBootstrapToken"}[d]
return [...]string{"PhaseUploadConfigKubeadm", "PhaseUploadConfigKubelet", "PhaseBootstrapToken"}[d]
}
type KubeadmPhase struct {

View File

@@ -63,7 +63,7 @@ func GetKubeadmManifestDeps(ctx context.Context, client client.Client, tenantCon
if len(kubeProxy.ImageRepository) > 0 {
config.Parameters.KubeProxyOptions.Repository = kubeProxy.ImageRepository
} else {
config.Parameters.KubeProxyOptions.Repository = "k8s.gcr.io"
config.Parameters.KubeProxyOptions.Repository = "registry.k8s.io"
}
if len(kubeProxy.ImageTag) > 0 {

View File

@@ -88,7 +88,7 @@ func (r *KubeconfigResource) UpdateTenantControlPlaneStatus(ctx context.Context,
status.LastUpdate = metav1.Now()
status.SecretName = r.resource.GetName()
status.Checksum = r.resource.Annotations[constants.Checksum]
status.Checksum = utilities.GetObjectChecksum(r.resource)
return nil
}
@@ -152,7 +152,7 @@ func (r *KubeconfigResource) mutate(ctx context.Context, tenantControlPlane *kam
return err
}
if status.Checksum == checksum && kubeadm.IsKubeconfigValid(r.resource.Data[r.KubeConfigFileName]) {
if (status.Checksum == checksum || len(r.resource.UID) > 0) && kubeadm.IsKubeconfigValid(r.resource.Data[r.KubeConfigFileName]) {
return nil
}
@@ -174,13 +174,7 @@ func (r *KubeconfigResource) mutate(ctx context.Context, tenantControlPlane *kam
r.KubeConfigFileName: kubeconfig,
}
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
r.resource.SetAnnotations(map[string]string{
constants.Checksum: checksum,

View File

@@ -16,6 +16,10 @@ import (
"github.com/clastix/kamaji/internal/kubeadm"
)
const (
OperationResultEnqueueBack controllerutil.OperationResult = "enqueueBack"
)
type Resource interface {
Define(context.Context, *kamajiv1alpha1.TenantControlPlane) error
ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool

View File

@@ -16,7 +16,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
@@ -31,7 +30,7 @@ type SACertificate struct {
func (r *SACertificate) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.SA.SecretName != r.resource.GetName() ||
tenantControlPlane.Status.Certificates.SA.Checksum != r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.Certificates.SA.Checksum != utilities.GetObjectChecksum(r.resource)
}
func (r *SACertificate) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
@@ -76,7 +75,7 @@ func (r *SACertificate) GetName() string {
func (r *SACertificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.SA.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.SA.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.SA.Checksum = r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.Certificates.SA.Checksum = utilities.GetObjectChecksum(r.resource)
return nil
}
@@ -85,7 +84,7 @@ func (r *SACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1
return func() error {
logger := log.FromContext(ctx, "resource", r.GetName())
if checksum := tenantControlPlane.Status.Certificates.SA.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()[constants.Checksum] {
if checksum := tenantControlPlane.Status.Certificates.SA.Checksum; len(checksum) > 0 && checksum == utilities.GetObjectChecksum(r.resource) || len(r.resource.UID) > 0 {
isValid, err := crypto.CheckPublicAndPrivateKeyValidity(r.resource.Data[kubeadmconstants.ServiceAccountPublicKeyName], r.resource.Data[kubeadmconstants.ServiceAccountPrivateKeyName])
if err != nil {
logger.Info(fmt.Sprintf("%s public_key-private_key pair is not valid: %s", kubeadmconstants.ServiceAccountKeyBaseName, err.Error()))
@@ -114,20 +113,9 @@ func (r *SACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1
kubeadmconstants.ServiceAccountPrivateKeyName: sa.PrivateKey,
}
r.resource.SetLabels(utilities.MergeMaps(
utilities.KamajiLabels(),
map[string]string{
"kamaji.clastix.io/name": tenantControlPlane.GetName(),
"kamaji.clastix.io/component": r.GetName(),
},
))
r.resource.SetLabels(utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()))
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
utilities.SetObjectChecksum(r.resource, r.resource.Data)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}

View File

@@ -4,5 +4,5 @@
package upgrade
const (
KubeadmVersion = "v1.26.0"
KubeadmVersion = "v1.26.1"
)

View File

@@ -7,8 +7,35 @@ import (
"crypto/md5"
"encoding/hex"
"sort"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/clastix/kamaji/internal/constants"
)
// GetObjectChecksum returns the annotation checksum in case this is set,
// otherwise, an empty string.
func GetObjectChecksum(obj client.Object) string {
v, ok := obj.GetAnnotations()[constants.Checksum]
if !ok {
return ""
}
return v
}
// SetObjectChecksum calculates the checksum for the given map and store it in the object annotations.
func SetObjectChecksum(obj client.Object, data any) {
annotations := obj.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
annotations[constants.Checksum] = CalculateMapChecksum(data)
obj.SetAnnotations(annotations)
}
// CalculateMapChecksum orders the map according to its key, and calculating the overall md5 of the values.
// It's expected to work with ConfigMap (map[string]string) and Secrets (map[string][]byte).
func CalculateMapChecksum(data any) string {
@@ -36,7 +63,7 @@ func calculateMapStringString(data map[string]string) string {
checksum += data[key]
}
return MD5Checksum([]byte(checksum))
return md5Checksum([]byte(checksum))
}
func calculateMapStringByte(data map[string][]byte) string {
@@ -53,10 +80,10 @@ func calculateMapStringByte(data map[string][]byte) string {
checksum += string(data[key])
}
return MD5Checksum([]byte(checksum))
return md5Checksum([]byte(checksum))
}
func MD5Checksum(value []byte) string {
func md5Checksum(value []byte) string {
hash := md5.Sum(value)
return hex.EncodeToString(hash[:])

View File

@@ -18,16 +18,11 @@ const (
separator = "-"
)
func KamajiLabels() map[string]string {
func KamajiLabels(tcpName, resourceName string) map[string]string {
return map[string]string{
constants.ProjectNameLabelKey: constants.ProjectNameLabelValue,
}
}
func CommonLabels(clusterName string) map[string]string {
return map[string]string{
"kamaji.clastix.io/type": "cluster",
"kamaji.clastix.io/cluster": clusterName,
constants.ProjectNameLabelKey: constants.ProjectNameLabelValue,
constants.ControlPlaneLabelKey: tcpName,
constants.ControlPlaneLabelResource: resourceName,
}
}