mirror of
https://github.com/clastix/kamaji.git
synced 2026-02-19 20:39:51 +00:00
Compare commits
80 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aed48e1bf0 | ||
|
|
0037e6e689 | ||
|
|
56071434e6 | ||
|
|
2d39c9ab0b | ||
|
|
b2fbb52361 | ||
|
|
a2236e76cf | ||
|
|
b1ea75f9c0 | ||
|
|
6aea80ce45 | ||
|
|
5ebe123994 | ||
|
|
d1910cd389 | ||
|
|
203e168397 | ||
|
|
b29a79da36 | ||
|
|
5ec586960f | ||
|
|
90aef60c18 | ||
|
|
9ce8da0b37 | ||
|
|
9d73905965 | ||
|
|
32383be1d0 | ||
|
|
6ffd6bbdfd | ||
|
|
b7169215ae | ||
|
|
f8a0206785 | ||
|
|
1d548665ee | ||
|
|
37616865b4 | ||
|
|
d31b3eab0a | ||
|
|
28a098af21 | ||
|
|
a849a84fd0 | ||
|
|
bbfec75e7f | ||
|
|
ced34a50e6 | ||
|
|
1311220b94 | ||
|
|
0e57b32ebc | ||
|
|
4753c8ac8d | ||
|
|
b99639c9fa | ||
|
|
f3d95add5b | ||
|
|
dc3d5060ca | ||
|
|
06a55f6a70 | ||
|
|
7a160cdb74 | ||
|
|
9688d288b7 | ||
|
|
87c7c984de | ||
|
|
e5cccfe88b | ||
|
|
197518b0b4 | ||
|
|
7ac8e5e539 | ||
|
|
cec4f9136d | ||
|
|
4299b72d7f | ||
|
|
eff68db336 | ||
|
|
74a6eb6b80 | ||
|
|
21fe27935f | ||
|
|
e3a8ff90da | ||
|
|
8e6cea2d2d | ||
|
|
1c90a4f333 | ||
|
|
6123d9a5a4 | ||
|
|
587d3bb24e | ||
|
|
4465bd8449 | ||
|
|
cf1f2763f6 | ||
|
|
25dc19f839 | ||
|
|
1ccc1d1b1e | ||
|
|
1d96710890 | ||
|
|
edceda3302 | ||
|
|
755cc5bacd | ||
|
|
e0c86d685c | ||
|
|
ddb700f4f0 | ||
|
|
4bdddfc695 | ||
|
|
8b999f1323 | ||
|
|
2571086ff3 | ||
|
|
cd9d92296b | ||
|
|
f24ff618a9 | ||
|
|
4bf39149ec | ||
|
|
045c5bbd7c | ||
|
|
6eb3171817 | ||
|
|
289bad540c | ||
|
|
ac06447706 | ||
|
|
95de31d697 | ||
|
|
0037b4941c | ||
|
|
c251f57f06 | ||
|
|
129cb0e6fe | ||
|
|
73e0618ad3 | ||
|
|
6c2634b5e9 | ||
|
|
dac670113f | ||
|
|
c8039cdf5c | ||
|
|
a7cfc9a898 | ||
|
|
0f1a4f28de | ||
|
|
40f57466e2 |
6
.github/workflows/ci.yaml
vendored
6
.github/workflows/ci.yaml
vendored
@@ -14,12 +14,12 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.19'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3.2.0
|
||||
with:
|
||||
version: v1.49.0
|
||||
version: v1.54.2
|
||||
only-new-issues: false
|
||||
args: --timeout 5m --config .golangci.yml
|
||||
diff:
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.19'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
- run: make yaml-installation-file
|
||||
- name: Checking if YAML installer file is not aligned
|
||||
|
||||
2
.github/workflows/e2e.yaml
vendored
2
.github/workflows/e2e.yaml
vendored
@@ -38,7 +38,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.19'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
- run: |
|
||||
sudo apt-get update
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -32,5 +32,8 @@ bin
|
||||
**/*.key
|
||||
**/*.pem
|
||||
**/*.csr
|
||||
**/server-csr.json
|
||||
.DS_Store
|
||||
|
||||
**/server-csr.json
|
||||
!deploy/kine/mysql/server-csr.json
|
||||
!deploy/kine/nats/server-csr.json
|
||||
|
||||
@@ -11,6 +11,7 @@ linters-settings:
|
||||
|
||||
linters:
|
||||
disable:
|
||||
- depguard
|
||||
- wrapcheck
|
||||
- gomnd
|
||||
- scopelint
|
||||
@@ -36,6 +37,8 @@ linters:
|
||||
- funlen
|
||||
- dupl
|
||||
- cyclop
|
||||
- gocognit
|
||||
- nestif
|
||||
# deprecated linters
|
||||
- deadcode
|
||||
- golint
|
||||
|
||||
27
ADOPTERS.md
Normal file
27
ADOPTERS.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Adopters
|
||||
|
||||
This is a list of companies that have adopted Kamaji.
|
||||
Feel free to open a Pull-Request to get yours listed.
|
||||
|
||||
### Adopter list (alphabetically)
|
||||
|
||||
| Type | Name | Since | Website | Use-Case |
|
||||
|:-|:-|:-|:-|:-|
|
||||
| End-user | Sicuro Tech Lab | 2024 | [link](https://sicurotechlab.it/) | Sicuro Tech Lab offers cloud infrastructure for Web Agencies and uses kamaji to provide managed k8s services. |
|
||||
| R&D | TIM | 2024 | [link](https://www.gruppotim.it) | TIM is an Italian telecommunications company using Kamaji for experimental research and development purposes. |
|
||||
| End-user | KINX | 2024 | [link](https://kinx.net/?lang=en) | KINX is an Internet infrastructure service provider and will use kamaji for its new [Managed Kubernetes Service](https://kinx.net/service/cloud/kubernetes/intro/?lang=en). |
|
||||
| End-user | sevensphere | 2023 | [link](https://www.sevensphere.io) | Sevensphere provides consulting services for end-user companies / cloud providers and uses Kamaji for designing cloud/on-premises Kubernetes-as-a-Service platform. |
|
||||
| Vendor | Ænix | 2023 | [link](https://aenix.io/) | Ænix provides consulting services for cloud providers and uses Kamaji for running Kubernetes-as-a-Service in free PaaS platform [Cozystack](https://cozystack.io). |
|
||||
| Vendor | Netsons | 2023 | [link](https://www.netsons.com) | Netsons is an Italian hosting and cloud provider and uses Kamaji in its [Managed Kubernetes](https://www.netsons.com/kubernetes) offering. |
|
||||
| Vendor | Aknostic | 2023 | [link](https://aknostic.com) | Aknostic is a cloud-native consultancy company using Kamaji to build a Kubernetes based PaaS. |
|
||||
|
||||
|
||||
### Adopter Types
|
||||
|
||||
**End-user**: The organization runs Kamaji in production in some way.
|
||||
|
||||
**Integration**: The organization has a product that integrates with Kamaji, but does not contain Kamaji.
|
||||
|
||||
**Vendor**: The organization packages Kamaji in their product and sells it as part of their product.
|
||||
|
||||
**R&D**: Company that exploring innovative technologies and solutions for research and development purposes.
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.19 as builder
|
||||
FROM golang:1.22 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
|
||||
22
Makefile
22
Makefile
@@ -3,7 +3,7 @@
|
||||
# To re-generate a bundle for another specific version without changing the standard setup, you can:
|
||||
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
|
||||
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
|
||||
VERSION ?= 0.3.4
|
||||
VERSION ?= 0.6.0
|
||||
|
||||
# CHANNELS define the bundle channels used in the bundle.
|
||||
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
|
||||
@@ -85,11 +85,11 @@ kind: ## Download kind locally if necessary.
|
||||
|
||||
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
|
||||
controller-gen: ## Download controller-gen locally if necessary.
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.11.4)
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0)
|
||||
|
||||
GOLANGCI_LINT = $(shell pwd)/bin/golangci-lint
|
||||
golangci-lint: ## Download golangci-lint locally if necessary.
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.49.0)
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2)
|
||||
|
||||
KUSTOMIZE = $(shell pwd)/bin/kustomize
|
||||
kustomize: ## Download kustomize locally if necessary.
|
||||
@@ -132,7 +132,11 @@ datastore-postgres:
|
||||
$(MAKE) NAME=gold _datastore-postgres
|
||||
|
||||
_datastore-etcd:
|
||||
$(HELM) upgrade --install etcd-$(NAME) clastix/kamaji-etcd --create-namespace -n etcd-system --set datastore.enabled=true
|
||||
$(HELM) upgrade --install etcd-$(NAME) clastix/kamaji-etcd --create-namespace -n etcd-system --set datastore.enabled=true --set fullnameOverride=etcd-$(NAME)
|
||||
|
||||
_datastore-nats:
|
||||
$(MAKE) NAME=$(NAME) NAMESPACE=nats-system -C deploy/kine/nats nats
|
||||
kubectl apply -f $(shell pwd)/config/samples/kamaji_v1alpha1_datastore_nats_$(NAME).yaml
|
||||
|
||||
datastore-etcd: helm
|
||||
$(HELM) repo add clastix https://clastix.github.io/charts
|
||||
@@ -141,7 +145,15 @@ datastore-etcd: helm
|
||||
$(MAKE) NAME=silver _datastore-etcd
|
||||
$(MAKE) NAME=gold _datastore-etcd
|
||||
|
||||
datastores: datastore-mysql datastore-etcd datastore-postgres ## Install all Kamaji DataStores with multiple drivers, and different tiers.
|
||||
datastore-nats: helm
|
||||
$(HELM) repo add nats https://nats-io.github.io/k8s/helm/charts/
|
||||
$(HELM) repo update
|
||||
$(MAKE) NAME=bronze _datastore-nats
|
||||
$(MAKE) NAME=silver _datastore-nats
|
||||
$(MAKE) NAME=gold _datastore-nats
|
||||
$(MAKE) NAME=notls _datastore-nats
|
||||
|
||||
datastores: datastore-mysql datastore-etcd datastore-postgres datastore-nats ## Install all Kamaji DataStores with multiple drivers, and different tiers.
|
||||
|
||||
##@ Build
|
||||
|
||||
|
||||
151
README.md
151
README.md
@@ -3,45 +3,158 @@
|
||||
<p align="left">
|
||||
<img src="https://img.shields.io/github/license/clastix/kamaji"/>
|
||||
<img src="https://img.shields.io/github/go-mod/go-version/clastix/kamaji"/>
|
||||
<a href="https://github.com/clastix/kamaji/releases">
|
||||
<img src="https://img.shields.io/github/v/release/clastix/kamaji"/>
|
||||
<img src="https://goreportcard.com/badge/github.com/clastix/kamaji">
|
||||
</a>
|
||||
<a href="https://github.com/clastix/kamaji/releases"><img src="https://img.shields.io/github/v/release/clastix/kamaji"/></a>
|
||||
<img src="https://goreportcard.com/badge/github.com/clastix/kamaji">
|
||||
<a href="https://kubernetes.slack.com/archives/C03GLTTMWNN"><img alt="#kamaji on Kubernetes Slack" src="https://img.shields.io/badge/slack-@kubernetes/kamaji-blue.svg?logo=slack"/></a>
|
||||
</p>
|
||||
|
||||

|
||||

|
||||
|
||||
**Kamaji** is a **Kubernetes Control Plane Manager**. It operates Kubernetes at scale with a fraction of the operational burden. Kamaji is special because the Control Plane components are running inside pods instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
|
||||
### 🤔 What is Kamaji?
|
||||
|
||||
<img src="docs/content/images/architecture.png" width="600">
|
||||
**Kamaji** is a **Kubernetes Control Plane Manager** leveraging on the concept of [**Hosted Control Plane**](https://clastix.io/post/the-raise-of-hosted-control-plane-in-kubernetes/).
|
||||
|
||||
## Main Features
|
||||
Kamaji's approach is based on running the Kubernetes Control Plane components in Pods instead of dedicated machines.
|
||||
This allows operating Kubernetes clusters at scale, with a fraction of the operational burden.
|
||||
Thanks to this approach, running multiple Control Planes can be cheaper and easier to deploy and operate.
|
||||
|
||||
- **Multi-cluster Management:** centrally manage multiple Kubernetes clusters from a single Management Cluster.
|
||||
_Kamaji is like a fleet of Site Reliability Engineers with expertise codified into its logic, working 24/7 to keep up and running your Control Planes._
|
||||
|
||||
<img src="docs/content/images/architecture.png" width="600" style="display: block; margin: 0 auto">
|
||||
|
||||
### 📖 How it works
|
||||
|
||||
Kamaji is extending the Kubernetes API capabilities thanks to [Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions).
|
||||
|
||||
By installing Kamaji, two pairs of new APIs will be available:
|
||||
|
||||
- `TenantControlPlane`, the instance definition of your desired Kubernetes Control Plane
|
||||
- `Datastore`, the backing store used by one (or more) `TenantControlPlane`
|
||||
|
||||
The `TenantControlPlane` (short-named as `tcp`) objects are Namespace-scoped and allows configuring every aspect of your desired Control Plane.
|
||||
Besides the Kubernetes configuration values, you can specify the Pod options such as limit, request, tolerations, node selector, etc.,
|
||||
as well as how these should be exposed (e.g.: using a `ClusterIP`, a `LoadBalancer`, or a `NodePort`).
|
||||
|
||||
The `TenantControlPlane` is the stateless definition of the Control Plane allowing to set up the required components for a full-fledged Kubernetest cluster.
|
||||
The state is managed by the `Datastore` API, a cluster-scoped resource which can hold the data of one or more Kubernetes clusters.
|
||||
|
||||
> For further information about the API specifications and all the available options,
|
||||
> refer to the official [API reference](https://kamaji.clastix.io/reference/api/#tenantcontrolplane).
|
||||
|
||||
### ⭐️ Main features
|
||||
|
||||
- **Fast provisioning time**: depending on the infrastructure, Tenant Control Planes are up and ready to serve traffic in **16 seconds**.
|
||||
- **Streamlined update**: the rollout to a new Kubernetes version for a given Tenant Control Plane takes just **10 seconds**, with a Blue/Green deployment to avoid serving mixed Kubernetes versions.
|
||||
- **Resource optimization**: thanks to the Datastore decoupling, there's no need of odd number instances (e.g.: RAFT consensus) by allowing to save up to 60% of HW resources.
|
||||
- **Scale from zero to the moon**: scale down the instance when there's no usage, or automatically scale to support the traffic spikes reusing the Kubernetes patterns.
|
||||
- **Declarative approach, constant reconciliation**: thanks to the Operator pattern, drift detection happens in real-time, maintaining the desired state.
|
||||
- **Automated certificates management**: Kamaji leverages on `kubeadm` and the certificates are automatically created and rotated for you.
|
||||
- **Managing core addons**: Kamaji allows configuring automatically `kube-proxy`, `CoreDNS`, and `konnectivity`, with automatic remediation in case of user errors (e.g.: deleting the `CoreDNS` deployment).
|
||||
- **Auto Healing**: the `TenantControlPlane` objects in the management cluster are tracked by Kamaji, in case of deletion of those, everything is created in an idempotent way.
|
||||
- **Datastore multi-tenancy**: optionally, Kamaji allows running multiple Control Planes on the same _Datastore_ instance leveraging on the multi-tenancy of each driver, decreasing operations and optimizing costs.
|
||||
- **Overcoming `etcd` limitations**: optionally, Kamaji allows using a different _Datastore_ thanks to [`kine`](https://github.com/k3s-io/kine) by supporting `MySQL`, `PostgreSQL`, or `NATS` as an alternative.
|
||||
- **Simplifying mixed-networks setup**: thanks to [`Konnectivity`](https://kubernetes.io/docs/tasks/extend-kubernetes/setup-konnectivity/),
|
||||
the Tenant Control Plane is connected to the worker nodes hosted in a different network, overcoming the no-NAT availability when dealing with nodes with a non routable IP address
|
||||
(e.g.: worker nodes in a different infrastructure).
|
||||
|
||||
### 🚀 Use cases
|
||||
|
||||
- [**Creating a private Managed Kubernetes Service**](https://clastix.io/post/netsons-builds-a-managed-kubernetes-service-with-kamaji-and-open-stack/)
|
||||
- [**Building a Platform as a Service**](https://aenix.io/cozystack/)
|
||||
- [**Overcoming public Managed Kubernetes Services**](https://clastix.io/post/overcoming-eks-limitations-with-kamaji-on-aws/) such as EKS
|
||||
- [**Hybrid infrastructures**](https://clastix.io/post/bridging-the-gap-hybrid-kubernetes-clusters-with-remote-control-planes/):
|
||||
host the Control Plane on the Cloud and worker nodes on prem or vice-versa, according to your needs.
|
||||
- [**Kubernetes at the edge**](https://clastix.io/post/edgevolution-unleashing-the-power-of-kubernetes-clusters-for-a-revolutionary-edge-computing-experience/):
|
||||
take full advantage of the _Kubernetes API Server as a service_ paradigm.
|
||||
- **Kubernetes Control Plane as a Service:** centrally manage multiple Kubernetes clusters from a single management point (_Multi-Cluster management_).
|
||||
- **High-density Control Plane:** place multiple control planes on the same infrastructure, instead of having dedicated machines for each control plane.
|
||||
- **Strong Multi-tenancy:** leave users to access the control plane with admin permissions while keeping them isolated at the infrastructure level.
|
||||
- **Kubernetes Inception:** use Kubernetes to manage Kubernetes with automation, high-availability, fault tolerance, and autoscaling out of the box.
|
||||
- **Bring Your Own Device:** keep the control plane isolated from data plane. Worke nodes can join and run consistently everywhere: cloud, edge, and data-center.
|
||||
- **Bring Your Own Device:** keep the control plane isolated from data plane. Worker nodes can join and run consistently from everywhere: cloud, edge, and data-center.
|
||||
- **Full CNCF compliant:** all clusters are built with upstream Kubernetes binaries, resulting in full CNCF compliant Kubernetes clusters.
|
||||
|
||||
## Roadmap
|
||||
> 🤔 You'd like to do the same but don't know how?
|
||||
> 💡 [CLASTIX](https://clastix.io/) can help you with your needs!
|
||||
|
||||
### 🧑💻 Production grade
|
||||
|
||||
Kamaji is empowering several businesses, and it counts public adopters.
|
||||
Check out the [adopters](./ADOPTERS.md) file to learn more.
|
||||
|
||||
> 🤗 If you're using Kamaji, share your love by opening a PR!
|
||||
|
||||
### 🍦 Vanilla Kubernetes clusters
|
||||
|
||||
Kamaji is **not** yet-another-Kubernetes distribution: you have full freedom on the technology stack to provide to end users.
|
||||
Kamaji is a perfect fit for Platform Engineering, hiding the complexity of the Control Plane management to developers and DevOps engineers.
|
||||
|
||||
The provided Kubernetes Control Planes are [CNCF compliant clusters](https://kamaji.clastix.io/reference/conformance/).
|
||||
|
||||
<img src="https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/certified-kubernetes/versionless/color/certified-kubernetes-color.png" style="display: block; width: 75px; margin: 0 auto">
|
||||
|
||||
### 🐢 Cluster API support
|
||||
|
||||
Kamaji is **not** a [Cluster API](https://cluster-api.sigs.k8s.io/) replacement, rather, it plays very well with it.
|
||||
|
||||
Since Kamaji is just focusing on the Control Plane a [Kamaji's Cluster API Control Plane provider](https://github.com/clastix/cluster-api-control-plane-provider-kamaji) has been developed.
|
||||
|
||||
### 🛣️ Roadmap
|
||||
|
||||
- [x] Dynamic address on Load Balancer
|
||||
- [x] Zero Downtime Tenant Control Plane upgrade
|
||||
- [x] Join worker nodes from anywhere
|
||||
- [x] Alternative datastore MySQL and PostgreSQL
|
||||
- [x] Pool of multiple datastores
|
||||
- [x] Seamless migration between datastores
|
||||
- [x] [Join worker nodes from anywhere thanks to Konnectivity](https://kamaji.clastix.io/concepts/#konnectivity)
|
||||
- [x] [Alternative datastore MySQL, PostgreSQL, NATS](https://kamaji.clastix.io/guides/alternative-datastore/)
|
||||
- [x] [Pool of multiple datastores](https://kamaji.clastix.io/concepts/#datastores)
|
||||
- [x] [Seamless migration between datastores](https://kamaji.clastix.io/guides/datastore-migration/)
|
||||
- [ ] Automatic assignment to a datastore
|
||||
- [ ] Autoscaling of Tenant Control Plane
|
||||
- [x] Provisioning through Cluster APIs
|
||||
- [x] [Provisioning through Cluster APIs](https://github.com/clastix/cluster-api-control-plane-provider-kamaji)
|
||||
- [ ] Terraform provider
|
||||
- [ ] Custom Prometheus metrics
|
||||
|
||||
### 🎥 Multimedia
|
||||
|
||||
## Documentation
|
||||
Please, check the project's [documentation](https://kamaji.clastix.io/) for getting started with Kamaji.
|
||||
- Playlist ▶️ [Tutorials and How-Tos by Dario Tranchitella, CLASTIX](https://www.youtube.com/playlist?list=PLjiUjoV4Ws_3pNsUpTXI-KKk731nD2MQY)
|
||||
- YouTube ▶️ [Metal³ provisioning with Kamaji Hosted Control Planes by Huy Mai, Ericsson](https://youtu.be/u9sbURj6jXY?t=10536)
|
||||
- YouTube ▶️ [Hands-on introduction to Kamaji](https://www.youtube.com/watch?v=HhevxwQWQ88)
|
||||
- YouTube ▶️ [Scaling Kubernetes up to 1,000 Control Planes](https://www.youtube.com/watch?v=W_HXRXJh96U)
|
||||
- YouTube ▶️ [Equinix, Kamaji, and Cluster API](https://www.youtube.com/watch?v=TLBTqROj_wA)
|
||||
- YouTube ▶️ [Rancher & Kamaji: solving multitenancy challenges in the Kubernetes world](https://www.youtube.com/watch?v=VXHNrMmlF8U)
|
||||
- YouTube ▶️ [Enabling Self-Service Kubernetes clusters with Kamaji and Paralus](https://www.youtube.com/watch?v=JWA2LwZazM0)
|
||||
|
||||
## Contributions
|
||||
Kamaji is Open Source with Apache 2 license and any contribution is welcome. Open an issue or suggest an enhancement on the GitHub [project's page](https://github.com/clastix/kamaji). Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
|
||||
### 🏷️ Versioning
|
||||
|
||||
Versioning adheres to the [Semantic Versioning](http://semver.org/) principles.
|
||||
A full list of the available releases is available in the GitHub repository's [**Release** section](https://github.com/clastix/kamaji/releases).
|
||||
|
||||
### 📄 Documentation
|
||||
|
||||
Further documentation can be found on the official [Kamaji documentation website](https://kamaji.clastix.io/).
|
||||
|
||||
### 🤝 Contributions
|
||||
|
||||
Contributions are highly appreciated and very welcomed!
|
||||
|
||||
In case of bugs, please, check if the issue has been already opened by checking the [GitHub Issues](https://github.com/clastix/kamaji/issues) section.
|
||||
In case it isn't, you can open a new one: a detailed report will help us to replicate it, assess it, and work on a fix.
|
||||
|
||||
You can express your intention in working on the fix on your own.
|
||||
The commit messages are checked according to the described [semantics](https://github.com/projectcapsule/capsule/blob/main/CONTRIBUTING.md#semantics).
|
||||
Commits are used to generate the changelog, and their author will be referenced in it.
|
||||
|
||||
In case of **✨ Feature Requests** please use the [Discussion's Feature Request section](https://github.com/clastix/kamaji/discussions/categories/feature-requests).
|
||||
|
||||
### 📝 License
|
||||
|
||||
The Kamaji Cluster API Control Plane provider is licensed under Apache 2.0.
|
||||
The code is provided as-is with no warranties.
|
||||
|
||||
### 🛟 Commercial Support
|
||||
|
||||
 [CLASTIX](https://clastix.io/) is the commercial company behind Kamaji and the Cluster API Control Plane provider.
|
||||
|
||||
If you're looking to run Kamaji in production and would like to learn more, **CLASTIX** can help by offering [Open Source support plans](https://clastix.io/support),
|
||||
as well as providing a comprehensive Enterprise Platform named [CLASTIX Enterprise Platform](https://clastix.cloud/), built on top of the Kamaji and [Capsule](https://capsule.clastix.io/) project (now donated to CNCF as a Sandbox project).
|
||||
|
||||
Feel free to get in touch with the provided [Contact form](https://clastix.io/contact).
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=etcd;MySQL;PostgreSQL
|
||||
// +kubebuilder:validation:Enum=etcd;MySQL;PostgreSQL;NATS
|
||||
|
||||
type Driver string
|
||||
|
||||
@@ -16,6 +16,7 @@ var (
|
||||
EtcdDriver Driver = "etcd"
|
||||
KineMySQLDriver Driver = "MySQL"
|
||||
KinePostgreSQLDriver Driver = "PostgreSQL"
|
||||
KineNatsDriver Driver = "NATS"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
@@ -33,7 +34,8 @@ type DataStoreSpec struct {
|
||||
// This value is optional.
|
||||
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
|
||||
// Defines the TLS/SSL configuration required to connect to the data store in a secure way.
|
||||
TLSConfig TLSConfig `json:"tlsConfig"`
|
||||
// This value is optional.
|
||||
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
|
||||
}
|
||||
|
||||
// TLSConfig contains the information used to connect to the data store using a secured connection.
|
||||
@@ -42,7 +44,7 @@ type TLSConfig struct {
|
||||
// The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
|
||||
CertificateAuthority CertKeyPair `json:"certificateAuthority"`
|
||||
// Specifies the SSL/TLS key and private key pair used to connect to the data store.
|
||||
ClientCertificate ClientCertificate `json:"clientCertificate"`
|
||||
ClientCertificate *ClientCertificate `json:"clientCertificate,omitempty"`
|
||||
}
|
||||
|
||||
type ClientCertificate struct {
|
||||
|
||||
@@ -43,20 +43,22 @@ func (d *DatastoreUsedSecret) ExtractValue() client.IndexerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
if ds.Spec.TLSConfig.CertificateAuthority.Certificate.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.CertificateAuthority.Certificate.SecretRef))
|
||||
}
|
||||
if ds.Spec.TLSConfig != nil {
|
||||
if ds.Spec.TLSConfig.CertificateAuthority.Certificate.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.CertificateAuthority.Certificate.SecretRef))
|
||||
}
|
||||
|
||||
if ds.Spec.TLSConfig.CertificateAuthority.PrivateKey != nil && ds.Spec.TLSConfig.CertificateAuthority.PrivateKey.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.CertificateAuthority.PrivateKey.SecretRef))
|
||||
}
|
||||
if ds.Spec.TLSConfig.CertificateAuthority.PrivateKey != nil && ds.Spec.TLSConfig.CertificateAuthority.PrivateKey.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.CertificateAuthority.PrivateKey.SecretRef))
|
||||
}
|
||||
|
||||
if ds.Spec.TLSConfig.ClientCertificate.Certificate.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.ClientCertificate.Certificate.SecretRef))
|
||||
}
|
||||
if ds.Spec.TLSConfig.ClientCertificate.Certificate.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.ClientCertificate.Certificate.SecretRef))
|
||||
}
|
||||
|
||||
if ds.Spec.TLSConfig.ClientCertificate.PrivateKey.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.ClientCertificate.PrivateKey.SecretRef))
|
||||
if ds.Spec.TLSConfig.ClientCertificate.PrivateKey.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.ClientCertificate.PrivateKey.SecretRef))
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
|
||||
@@ -138,9 +138,12 @@ type DeploymentSpec struct {
|
||||
// (kube-apiserver, controller-manager, and scheduler).
|
||||
Resources *ControlPlaneComponentsResources `json:"resources,omitempty"`
|
||||
// ExtraArgs allows adding additional arguments to the Control Plane components,
|
||||
// such as kube-apiserver, controller-manager, and scheduler.
|
||||
ExtraArgs *ControlPlaneExtraArgs `json:"extraArgs,omitempty"`
|
||||
AdditionalMetadata AdditionalMetadata `json:"additionalMetadata,omitempty"`
|
||||
// such as kube-apiserver, controller-manager, and scheduler. WARNING - This option
|
||||
// can override existing parameters and cause components to misbehave in unxpected ways.
|
||||
// Only modify if you know what you are doing.
|
||||
ExtraArgs *ControlPlaneExtraArgs `json:"extraArgs,omitempty"`
|
||||
AdditionalMetadata AdditionalMetadata `json:"additionalMetadata,omitempty"`
|
||||
PodAdditionalMetadata AdditionalMetadata `json:"podAdditionalMetadata,omitempty"`
|
||||
// AdditionalInitContainers allows adding additional init containers to the Control Plane deployment.
|
||||
AdditionalInitContainers []corev1.Container `json:"additionalInitContainers,omitempty"`
|
||||
// AdditionalContainers allows adding additional containers to the Control Plane deployment.
|
||||
@@ -150,6 +153,9 @@ type DeploymentSpec struct {
|
||||
// AdditionalVolumeMounts allows to mount an additional volume into each component of the Control Plane
|
||||
// (kube-apiserver, controller-manager, and scheduler).
|
||||
AdditionalVolumeMounts *AdditionalVolumeMounts `json:"additionalVolumeMounts,omitempty"`
|
||||
// +kubebuilder:default="default"
|
||||
// ServiceAccountName allows to specify the service account to be mounted to the pods of the Control plane deployment
|
||||
ServiceAccountName string `json:"serviceAccountName,omitempty"`
|
||||
}
|
||||
|
||||
// AdditionalVolumeMounts allows mounting additional volumes to the Control Plane components.
|
||||
@@ -189,6 +195,9 @@ type ImageOverrideTrait struct {
|
||||
}
|
||||
|
||||
// ExtraArgs allows adding additional arguments to said component.
|
||||
// WARNING - This option can override existing konnectivity
|
||||
// parameters and cause konnectivity components to misbehave in
|
||||
// unxpected ways. Only modify if you know what you are doing.
|
||||
type ExtraArgs []string
|
||||
|
||||
type KonnectivityServerSpec struct {
|
||||
@@ -211,8 +220,12 @@ type KonnectivityAgentSpec struct {
|
||||
Image string `json:"image,omitempty"`
|
||||
// Version for Konnectivity agent.
|
||||
// +kubebuilder:default=v0.0.32
|
||||
Version string `json:"version,omitempty"`
|
||||
ExtraArgs ExtraArgs `json:"extraArgs,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
// Tolerations for the deployed agent.
|
||||
// Can be customized to start the konnectivity-agent even if the nodes are not ready or tainted.
|
||||
// +kubebuilder:default={{key: "CriticalAddonsOnly", operator: "Exists"}}
|
||||
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
||||
ExtraArgs ExtraArgs `json:"extraArgs,omitempty"`
|
||||
}
|
||||
|
||||
// KonnectivitySpec defines the spec for Konnectivity.
|
||||
|
||||
@@ -28,9 +28,10 @@ func (c CGroupDriver) String() string {
|
||||
}
|
||||
|
||||
const (
|
||||
ServiceTypeLoadBalancer = (ServiceType)(corev1.ServiceTypeLoadBalancer)
|
||||
ServiceTypeClusterIP = (ServiceType)(corev1.ServiceTypeClusterIP)
|
||||
ServiceTypeNodePort = (ServiceType)(corev1.ServiceTypeNodePort)
|
||||
ServiceTypeLoadBalancer = (ServiceType)(corev1.ServiceTypeLoadBalancer)
|
||||
ServiceTypeClusterIP = (ServiceType)(corev1.ServiceTypeClusterIP)
|
||||
ServiceTypeNodePort = (ServiceType)(corev1.ServiceTypeNodePort)
|
||||
KubeconfigSecretKeyAnnotation = "kamaji.clastix.io/kubeconfig-secret-key"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=ClusterIP;NodePort;LoadBalancer
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
@@ -526,7 +525,11 @@ func (in *DataStoreSpec) DeepCopyInto(out *DataStoreSpec) {
|
||||
*out = new(BasicAuth)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.TLSConfig.DeepCopyInto(&out.TLSConfig)
|
||||
if in.TLSConfig != nil {
|
||||
in, out := &in.TLSConfig, &out.TLSConfig
|
||||
*out = new(TLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStoreSpec.
|
||||
@@ -578,6 +581,11 @@ func (in *DatastoreUsedSecret) DeepCopy() *DatastoreUsedSecret {
|
||||
func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
|
||||
*out = *in
|
||||
out.RegistrySettings = in.RegistrySettings
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
@@ -616,6 +624,7 @@ func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.AdditionalMetadata.DeepCopyInto(&out.AdditionalMetadata)
|
||||
in.PodAdditionalMetadata.DeepCopyInto(&out.PodAdditionalMetadata)
|
||||
if in.AdditionalInitContainers != nil {
|
||||
in, out := &in.AdditionalInitContainers, &out.AdditionalInitContainers
|
||||
*out = make([]v1.Container, len(*in))
|
||||
@@ -775,6 +784,13 @@ func (in *IngressSpec) DeepCopy() *IngressSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KonnectivityAgentSpec) DeepCopyInto(out *KonnectivityAgentSpec) {
|
||||
*out = *in
|
||||
if in.Tolerations != nil {
|
||||
in, out := &in.Tolerations, &out.Tolerations
|
||||
*out = make([]v1.Toleration, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ExtraArgs != nil {
|
||||
in, out := &in.ExtraArgs, &out.ExtraArgs
|
||||
*out = make(ExtraArgs, len(*in))
|
||||
@@ -1196,7 +1212,11 @@ func (in *StorageStatus) DeepCopy() *StorageStatus {
|
||||
func (in *TLSConfig) DeepCopyInto(out *TLSConfig) {
|
||||
*out = *in
|
||||
in.CertificateAuthority.DeepCopyInto(&out.CertificateAuthority)
|
||||
in.ClientCertificate.DeepCopyInto(&out.ClientCertificate)
|
||||
if in.ClientCertificate != nil {
|
||||
in, out := &in.ClientCertificate, &out.ClientCertificate
|
||||
*out = new(ClientCertificate)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig.
|
||||
|
||||
@@ -1,22 +1,24 @@
|
||||
apiVersion: v2
|
||||
appVersion: v0.3.4
|
||||
description: Kamaji is a Kubernetes Control Plane Manager.
|
||||
appVersion: v0.6.0
|
||||
description: Kamaji is the Hosted Control Plane Manager for Kubernetes.
|
||||
home: https://github.com/clastix/kamaji
|
||||
icon: https://github.com/clastix/kamaji/raw/master/assets/logo-colored.png
|
||||
kubeVersion: ">=1.21.0-0"
|
||||
maintainers:
|
||||
- email: dario@tranchitella.eu
|
||||
name: Dario Tranchitella
|
||||
url: https://clastix.io
|
||||
- email: me@maxgio.it
|
||||
name: Massimiliano Giovagnoli
|
||||
- email: me@bsctl.io
|
||||
name: Adriano Pezzuto
|
||||
url: https://clastix.io
|
||||
name: kamaji
|
||||
sources:
|
||||
- https://github.com/clastix/kamaji
|
||||
type: application
|
||||
version: 0.12.5
|
||||
version: 0.15.3
|
||||
annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
catalog.cattle.io/release-name: kamaji
|
||||
catalog.cattle.io/display-name: Kamaji
|
||||
catalog.cattle.io/display-name: Kamaji
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
# kamaji
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
Kamaji is a Kubernetes Control Plane Manager.
|
||||
Kamaji is the Hosted Control Plane Manager for Kubernetes.
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Name | Email | Url |
|
||||
| ---- | ------ | --- |
|
||||
| Dario Tranchitella | <dario@tranchitella.eu> | |
|
||||
| Dario Tranchitella | <dario@tranchitella.eu> | <https://clastix.io> |
|
||||
| Massimiliano Giovagnoli | <me@maxgio.it> | |
|
||||
| Adriano Pezzuto | <me@bsctl.io> | |
|
||||
| Adriano Pezzuto | <me@bsctl.io> | <https://clastix.io> |
|
||||
|
||||
## Source Code
|
||||
|
||||
@@ -66,6 +66,8 @@ Here the values you can override:
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| affinity | object | `{}` | Kubernetes affinity rules to apply to Kamaji controller pods |
|
||||
| cfssl.image.repository | string | `"cfssl/cfssl"` | |
|
||||
| cfssl.image.tag | string | `"latest"` | |
|
||||
| datastore.basicAuth.passwordSecret.keyPath | string | `nil` | The Secret key where the data is stored. |
|
||||
| datastore.basicAuth.passwordSecret.name | string | `nil` | The name of the Secret containing the password used to connect to the relational database. |
|
||||
| datastore.basicAuth.passwordSecret.namespace | string | `nil` | The namespace of the Secret containing the password used to connect to the relational database. |
|
||||
@@ -73,8 +75,9 @@ Here the values you can override:
|
||||
| datastore.basicAuth.usernameSecret.name | string | `nil` | The name of the Secret containing the username used to connect to the relational database. |
|
||||
| datastore.basicAuth.usernameSecret.namespace | string | `nil` | The namespace of the Secret containing the username used to connect to the relational database. |
|
||||
| datastore.driver | string | `"etcd"` | (string) The Kamaji Datastore driver, supported: etcd, MySQL, PostgreSQL (defaults=etcd). |
|
||||
| datastore.enabled | bool | `true` | (bool) Enable the Kamaji Datastore creation (default=true) |
|
||||
| datastore.endpoints | list | `[]` | (array) List of endpoints of the selected Datastore. When letting the Chart install the etcd datastore, this field is populated automatically. |
|
||||
| datastore.nameOverride | string | `nil` | The Datastore name override, if empty defaults to `default` |
|
||||
| datastore.nameOverride | string | `nil` | The Datastore name override, if empty and enabled=true defaults to `default`, if enabled=false, this is the name of the Datastore to connect to. |
|
||||
| datastore.tlsConfig.certificateAuthority.certificate.keyPath | string | `nil` | Key of the Secret which contains the content of the certificate. |
|
||||
| datastore.tlsConfig.certificateAuthority.certificate.name | string | `nil` | Name of the Secret containing the CA required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
| datastore.tlsConfig.certificateAuthority.certificate.namespace | string | `nil` | Namespace of the Secret containing the CA required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
@@ -87,6 +90,7 @@ Here the values you can override:
|
||||
| datastore.tlsConfig.clientCertificate.privateKey.keyPath | string | `nil` | Key of the Secret which contains the content of the private key. |
|
||||
| datastore.tlsConfig.clientCertificate.privateKey.name | string | `nil` | Name of the Secret containing the client certificate private key required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
| datastore.tlsConfig.clientCertificate.privateKey.namespace | string | `nil` | Namespace of the Secret containing the client certificate private key required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
| datastore.tlsConfig.enabled | bool | `true` | |
|
||||
| etcd.compactionInterval | int | `0` | ETCD Compaction interval (e.g. "5m0s"). (default: "0" (disabled)) |
|
||||
| etcd.deploy | bool | `true` | Install an etcd with enabled multi-tenancy along with Kamaji |
|
||||
| etcd.image | object | `{"pullPolicy":"IfNotPresent","repository":"quay.io/coreos/etcd","tag":"v3.5.6"}` | Install specific etcd image |
|
||||
@@ -100,10 +104,11 @@ Here the values you can override:
|
||||
| etcd.persistence.accessModes[0] | string | `"ReadWriteOnce"` | |
|
||||
| etcd.persistence.customAnnotations | object | `{}` | The custom annotations to add to the PVC |
|
||||
| etcd.persistence.size | string | `"10Gi"` | |
|
||||
| etcd.persistence.storageClass | string | `""` | |
|
||||
| etcd.persistence.storageClassName | string | `""` | |
|
||||
| etcd.port | int | `2379` | The client request port. |
|
||||
| etcd.serviceAccount.create | bool | `true` | Create a ServiceAccount, required to install and provision the etcd backing storage (default: true) |
|
||||
| etcd.serviceAccount.name | string | `""` | Define the ServiceAccount name to use during the setup and provision of the etcd backing storage (default: "") |
|
||||
| etcd.tolerations | list | `[]` | (array) Kubernetes affinity rules to apply to Kamaji etcd pods |
|
||||
| extraArgs | list | `[]` | A list of extra arguments to add to the kamaji controller default ones |
|
||||
| fullnameOverride | string | `""` | |
|
||||
| healthProbeBindAddress | string | `":8081"` | The address the probe endpoint binds to. (default ":8081") |
|
||||
|
||||
@@ -30,10 +30,19 @@ spec:
|
||||
description: DataStore is the Schema for the datastores API.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
@@ -41,25 +50,33 @@ spec:
|
||||
description: DataStoreSpec defines the desired state of DataStore.
|
||||
properties:
|
||||
basicAuth:
|
||||
description: In case of authentication enabled for the given data store, specifies the username and password pair. This value is optional.
|
||||
description: |-
|
||||
In case of authentication enabled for the given data store, specifies the username and password pair.
|
||||
This value is optional.
|
||||
properties:
|
||||
password:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
description: name is unique within a namespace to reference a secret resource.
|
||||
description: name is unique within a namespace to reference
|
||||
a secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret name must be unique.
|
||||
description: namespace defines the space within which
|
||||
the secret name must be unique.
|
||||
type: string
|
||||
required:
|
||||
- keyPath
|
||||
@@ -69,20 +86,26 @@ spec:
|
||||
username:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
description: name is unique within a namespace to reference a secret resource.
|
||||
description: name is unique within a namespace to reference
|
||||
a secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret name must be unique.
|
||||
description: namespace defines the space within which
|
||||
the secret name must be unique.
|
||||
type: string
|
||||
required:
|
||||
- keyPath
|
||||
@@ -99,36 +122,49 @@ spec:
|
||||
- etcd
|
||||
- MySQL
|
||||
- PostgreSQL
|
||||
- NATS
|
||||
type: string
|
||||
endpoints:
|
||||
description: List of the endpoints to connect to the shared datastore. No need for protocol, just bare IP/FQDN and port.
|
||||
description: |-
|
||||
List of the endpoints to connect to the shared datastore.
|
||||
No need for protocol, just bare IP/FQDN and port.
|
||||
items:
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
tlsConfig:
|
||||
description: Defines the TLS/SSL configuration required to connect to the data store in a secure way.
|
||||
description: |-
|
||||
Defines the TLS/SSL configuration required to connect to the data store in a secure way.
|
||||
This value is optional.
|
||||
properties:
|
||||
certificateAuthority:
|
||||
description: Retrieve the Certificate Authority certificate and private key, such as bare content of the file, or a SecretReference. The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
|
||||
description: |-
|
||||
Retrieve the Certificate Authority certificate and private key, such as bare content of the file, or a SecretReference.
|
||||
The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
|
||||
properties:
|
||||
certificate:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
description: name is unique within a namespace to reference a secret resource.
|
||||
description: name is unique within a namespace to
|
||||
reference a secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret name must be unique.
|
||||
description: namespace defines the space within which
|
||||
the secret name must be unique.
|
||||
type: string
|
||||
required:
|
||||
- keyPath
|
||||
@@ -138,20 +174,26 @@ spec:
|
||||
privateKey:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
description: name is unique within a namespace to reference a secret resource.
|
||||
description: name is unique within a namespace to
|
||||
reference a secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret name must be unique.
|
||||
description: namespace defines the space within which
|
||||
the secret name must be unique.
|
||||
type: string
|
||||
required:
|
||||
- keyPath
|
||||
@@ -162,25 +204,32 @@ spec:
|
||||
- certificate
|
||||
type: object
|
||||
clientCertificate:
|
||||
description: Specifies the SSL/TLS key and private key pair used to connect to the data store.
|
||||
description: Specifies the SSL/TLS key and private key pair used
|
||||
to connect to the data store.
|
||||
properties:
|
||||
certificate:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
description: name is unique within a namespace to reference a secret resource.
|
||||
description: name is unique within a namespace to
|
||||
reference a secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret name must be unique.
|
||||
description: namespace defines the space within which
|
||||
the secret name must be unique.
|
||||
type: string
|
||||
required:
|
||||
- keyPath
|
||||
@@ -190,20 +239,26 @@ spec:
|
||||
privateKey:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
description: name is unique within a namespace to reference a secret resource.
|
||||
description: name is unique within a namespace to
|
||||
reference a secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret name must be unique.
|
||||
description: namespace defines the space within which
|
||||
the secret name must be unique.
|
||||
type: string
|
||||
required:
|
||||
- keyPath
|
||||
@@ -216,18 +271,17 @@ spec:
|
||||
type: object
|
||||
required:
|
||||
- certificateAuthority
|
||||
- clientCertificate
|
||||
type: object
|
||||
required:
|
||||
- driver
|
||||
- endpoints
|
||||
- tlsConfig
|
||||
type: object
|
||||
status:
|
||||
description: DataStoreStatus defines the observed state of DataStore.
|
||||
properties:
|
||||
usedBy:
|
||||
description: List of the Tenant Control Planes, namespaced named, using this data store.
|
||||
description: List of the Tenant Control Planes, namespaced named,
|
||||
using this data store.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,11 @@
|
||||
Create a default fully qualified datastore name.
|
||||
*/}}
|
||||
{{- define "datastore.fullname" -}}
|
||||
{{- if .Values.datastore.enabled }}
|
||||
{{- default "default" .Values.datastore.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- required "A valid .Values.datastore.nameOverride required!" .Values.datastore.nameOverride }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{{- if .Values.datastore.enabled}}
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: DataStore
|
||||
metadata:
|
||||
@@ -19,8 +20,14 @@ spec:
|
||||
secretReference:
|
||||
{{- .Values.datastore.basicAuth.passwordSecret | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.datastore.tlsConfig.enabled }}
|
||||
tlsConfig:
|
||||
certificateAuthority:
|
||||
{{- include "datastore.certificateAuthority" . | indent 6 }}
|
||||
|
||||
{{- if .Values.datastore.tlsConfig.clientCertificate }}
|
||||
clientCertificate:
|
||||
{{- include "datastore.clientCertificate" . | indent 6 }}
|
||||
{{- end }}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
|
||||
@@ -30,11 +30,15 @@ spec:
|
||||
- bash
|
||||
- -c
|
||||
- |-
|
||||
etcdctl member list -w table &&
|
||||
etcdctl user add --no-password=true root &&
|
||||
etcdctl role add root &&
|
||||
etcdctl user grant-role root root &&
|
||||
etcdctl auth enable
|
||||
etcdctl member list -w table
|
||||
if etcdctl user get root &>/dev/null; then
|
||||
echo "User already exists, nothing to do"
|
||||
else
|
||||
etcdctl user add --no-password=true root &&
|
||||
etcdctl role add root &&
|
||||
etcdctl user grant-role root root &&
|
||||
etcdctl auth enable
|
||||
fi
|
||||
env:
|
||||
- name: ETCDCTL_ENDPOINTS
|
||||
value: https://etcd-0.{{ include "etcd.serviceName" . }}.{{ .Release.Namespace }}.svc.cluster.local:2379
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
restartPolicy: Never
|
||||
initContainers:
|
||||
- name: cfssl
|
||||
image: cfssl/cfssl:latest
|
||||
image: "{{ .Values.cfssl.image.repository }}:{{ .Values.cfssl.image.tag }}"
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
@@ -37,13 +37,21 @@ spec:
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |-
|
||||
kubectl --namespace={{ .Release.Namespace }} delete secret --ignore-not-found=true {{ include "etcd.caSecretName" . }} {{ include "etcd.clientSecretName" . }} &&
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret generic {{ include "etcd.caSecretName" . }} --from-file=/certs/ca.crt --from-file=/certs/ca.key --from-file=/certs/peer-key.pem --from-file=/certs/peer.pem --from-file=/certs/server-key.pem --from-file=/certs/server.pem &&
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret tls {{ include "etcd.clientSecretName" . }} --key=/certs/root-client-key.pem --cert=/certs/root-client.pem
|
||||
command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
if kubectl get secret {{ include "etcd.caSecretName" . }} --namespace={{ .Release.Namespace }} &>/dev/null; then
|
||||
echo "Secret {{ include "etcd.caSecretName" . }} already exists"
|
||||
else
|
||||
echo "Creating secret {{ include "etcd.caSecretName" . }}"
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret generic {{ include "etcd.caSecretName" . }} --from-file=/certs/ca.crt --from-file=/certs/ca.key --from-file=/certs/peer-key.pem --from-file=/certs/peer.pem --from-file=/certs/server-key.pem --from-file=/certs/server.pem
|
||||
fi
|
||||
if kubectl get secret {{ include "etcd.clientSecretName" . }} --namespace={{ .Release.Namespace }} &>/dev/null; then
|
||||
echo "Secret {{ include "etcd.clientSecretName" . }} already exists"
|
||||
else
|
||||
echo "Creating secret {{ include "etcd.clientSecretName" . }}"
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret tls {{ include "etcd.clientSecretName" . }} --key=/certs/root-client-key.pem --cert=/certs/root-client.pem
|
||||
fi
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: certs
|
||||
|
||||
@@ -15,6 +15,7 @@ rules:
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- delete
|
||||
resourceNames:
|
||||
- {{ include "etcd.caSecretName" . }}
|
||||
|
||||
@@ -22,6 +22,10 @@ spec:
|
||||
- name: certs
|
||||
secret:
|
||||
secretName: {{ include "etcd.caSecretName" . }}
|
||||
{{- with .Values.etcd.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: etcd
|
||||
image: {{ .Values.etcd.image.repository }}:{{ .Values.etcd.image.tag | default "v3.5.4" }}
|
||||
|
||||
@@ -54,13 +54,16 @@ etcd:
|
||||
name: ""
|
||||
persistence:
|
||||
size: 10Gi
|
||||
storageClass: ""
|
||||
storageClassName: ""
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
# -- The custom annotations to add to the PVC
|
||||
customAnnotations: {}
|
||||
# volumeType: local
|
||||
|
||||
# -- (array) Kubernetes affinity rules to apply to Kamaji etcd pods
|
||||
tolerations: []
|
||||
|
||||
overrides:
|
||||
caSecret:
|
||||
# -- Name of the secret which contains CA's certificate and private key. (default: "etcd-certs")
|
||||
@@ -157,7 +160,9 @@ loggingDevel:
|
||||
enable: false
|
||||
|
||||
datastore:
|
||||
# -- (string) The Datastore name override, if empty defaults to `default`
|
||||
# -- (bool) Enable the Kamaji Datastore creation (default=true)
|
||||
enabled: true
|
||||
# -- (string) The Datastore name override, if empty and enabled=true defaults to `default`, if enabled=false, this is the name of the Datastore to connect to.
|
||||
nameOverride:
|
||||
# -- (string) The Kamaji Datastore driver, supported: etcd, MySQL, PostgreSQL (defaults=etcd).
|
||||
driver: etcd
|
||||
@@ -179,6 +184,7 @@ datastore:
|
||||
# -- The Secret key where the data is stored.
|
||||
keyPath:
|
||||
tlsConfig:
|
||||
enabled: true
|
||||
certificateAuthority:
|
||||
certificate:
|
||||
# -- Name of the Secret containing the CA required to establish the mandatory SSL/TLS connection to the datastore.
|
||||
@@ -209,3 +215,8 @@ datastore:
|
||||
namespace:
|
||||
# -- Key of the Secret which contains the content of the private key.
|
||||
keyPath:
|
||||
|
||||
cfssl:
|
||||
image:
|
||||
repository: cfssl/cfssl
|
||||
tag: latest
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
cmdutils "github.com/clastix/kamaji/cmd/utils"
|
||||
@@ -95,15 +97,19 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", goRuntime.GOOS, goRuntime.GOARCH))
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
MetricsBindAddress: metricsBindAddress,
|
||||
Port: 9443,
|
||||
Scheme: scheme,
|
||||
Metrics: metricsserver.Options{
|
||||
BindAddress: metricsBindAddress,
|
||||
},
|
||||
WebhookServer: ctrlwebhook.NewServer(ctrlwebhook.Options{
|
||||
Port: 9443,
|
||||
}),
|
||||
HealthProbeBindAddress: healthProbeBindAddress,
|
||||
LeaderElection: leaderElect,
|
||||
LeaderElectionNamespace: managerNamespace,
|
||||
LeaderElectionID: "799b98bc.clastix.io",
|
||||
NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) {
|
||||
opts.Resync = &cacheResyncPeriod
|
||||
opts.SyncPeriod = &cacheResyncPeriod
|
||||
|
||||
return cache.New(config, opts)
|
||||
},
|
||||
@@ -116,7 +122,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
|
||||
tcpChannel, certChannel := make(controllers.TenantControlPlaneChannel), make(controllers.CertificateChannel)
|
||||
|
||||
if err = (&controllers.DataStore{TenantControlPlaneTrigger: tcpChannel}).SetupWithManager(mgr); err != nil {
|
||||
if err = (&controllers.DataStore{Client: mgr.GetClient(), TenantControlPlaneTrigger: tcpChannel}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "DataStore")
|
||||
|
||||
return err
|
||||
@@ -169,7 +175,9 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
handlers.Freeze{},
|
||||
},
|
||||
routes.TenantControlPlaneDefaults{}: {
|
||||
handlers.TenantControlPlaneDefaults{DefaultDatastore: datastore},
|
||||
handlers.TenantControlPlaneDefaults{
|
||||
DefaultDatastore: datastore,
|
||||
},
|
||||
},
|
||||
routes.TenantControlPlaneValidate{}: {
|
||||
handlers.TenantControlPlaneName{},
|
||||
|
||||
@@ -4,9 +4,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
_ "go.uber.org/automaxprocs" // Automatically set `GOMAXPROCS` to match Linux container CPU quota.
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -22,9 +19,6 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
Use: "kamaji",
|
||||
Short: "Build and operate Kubernetes at scale with a fraction of operational burden.",
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
// Seed is required to ensure non reproducibility for the certificates generate by Kamaji.
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
utilruntime.Must(kamajiv1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(appsv1.RegisterDefaults(scheme))
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.11.4
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: datastores.kamaji.clastix.io
|
||||
spec:
|
||||
group: kamaji.clastix.io
|
||||
@@ -29,14 +29,19 @@ spec:
|
||||
description: DataStore is the Schema for the datastores API.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
@@ -44,21 +49,24 @@ spec:
|
||||
description: DataStoreSpec defines the desired state of DataStore.
|
||||
properties:
|
||||
basicAuth:
|
||||
description: In case of authentication enabled for the given data
|
||||
store, specifies the username and password pair. This value is optional.
|
||||
description: |-
|
||||
In case of authentication enabled for the given data store, specifies the username and password pair.
|
||||
This value is optional.
|
||||
properties:
|
||||
password:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It
|
||||
has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference
|
||||
where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -77,15 +85,17 @@ spec:
|
||||
username:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It
|
||||
has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference
|
||||
where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -111,37 +121,40 @@ spec:
|
||||
- etcd
|
||||
- MySQL
|
||||
- PostgreSQL
|
||||
- NATS
|
||||
type: string
|
||||
endpoints:
|
||||
description: List of the endpoints to connect to the shared datastore.
|
||||
description: |-
|
||||
List of the endpoints to connect to the shared datastore.
|
||||
No need for protocol, just bare IP/FQDN and port.
|
||||
items:
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
tlsConfig:
|
||||
description: Defines the TLS/SSL configuration required to connect
|
||||
to the data store in a secure way.
|
||||
description: |-
|
||||
Defines the TLS/SSL configuration required to connect to the data store in a secure way.
|
||||
This value is optional.
|
||||
properties:
|
||||
certificateAuthority:
|
||||
description: Retrieve the Certificate Authority certificate and
|
||||
private key, such as bare content of the file, or a SecretReference.
|
||||
The key reference is required since etcd authentication is based
|
||||
on certificates, and Kamaji is responsible in creating this.
|
||||
description: |-
|
||||
Retrieve the Certificate Authority certificate and private key, such as bare content of the file, or a SecretReference.
|
||||
The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
|
||||
properties:
|
||||
certificate:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret
|
||||
reference where the content is stored. This value
|
||||
is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -160,16 +173,17 @@ spec:
|
||||
privateKey:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret
|
||||
reference where the content is stored. This value
|
||||
is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -195,16 +209,17 @@ spec:
|
||||
certificate:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret
|
||||
reference where the content is stored. This value
|
||||
is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -223,16 +238,17 @@ spec:
|
||||
privateKey:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret
|
||||
reference where the content is stored. This value
|
||||
is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -254,12 +270,10 @@ spec:
|
||||
type: object
|
||||
required:
|
||||
- certificateAuthority
|
||||
- clientCertificate
|
||||
type: object
|
||||
required:
|
||||
- driver
|
||||
- endpoints
|
||||
- tlsConfig
|
||||
type: object
|
||||
status:
|
||||
description: DataStoreStatus defines the observed state of DataStore.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
4187
config/install.yaml
4187
config/install.yaml
File diff suppressed because it is too large
Load Diff
@@ -13,4 +13,4 @@ kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: clastix/kamaji
|
||||
newTag: v0.3.4
|
||||
newTag: v0.6.0
|
||||
|
||||
34
config/samples/kamaji_v1alpha1_datastore_nats_bronze.yaml
Normal file
34
config/samples/kamaji_v1alpha1_datastore_nats_bronze.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: DataStore
|
||||
metadata:
|
||||
name: nats-bronze
|
||||
spec:
|
||||
driver: NATS
|
||||
endpoints:
|
||||
- bronze-nats.nats-system.svc:4222
|
||||
basicAuth:
|
||||
username:
|
||||
content: YWRtaW4=
|
||||
password:
|
||||
secretReference:
|
||||
name: nats-bronze-config
|
||||
namespace: nats-system
|
||||
keyPath: password
|
||||
tlsConfig:
|
||||
certificateAuthority:
|
||||
certificate:
|
||||
secretReference:
|
||||
name: nats-bronze-config
|
||||
namespace: nats-system
|
||||
keyPath: ca.crt
|
||||
clientCertificate:
|
||||
certificate:
|
||||
secretReference:
|
||||
name: nats-bronze-config
|
||||
namespace: nats-system
|
||||
keyPath: server.crt
|
||||
privateKey:
|
||||
secretReference:
|
||||
name: nats-bronze-config
|
||||
namespace: nats-system
|
||||
keyPath: server.key
|
||||
34
config/samples/kamaji_v1alpha1_datastore_nats_gold.yaml
Normal file
34
config/samples/kamaji_v1alpha1_datastore_nats_gold.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: DataStore
|
||||
metadata:
|
||||
name: nats-gold
|
||||
spec:
|
||||
driver: NATS
|
||||
endpoints:
|
||||
- nats-gold.nats-system.svc:4222
|
||||
basicAuth:
|
||||
username:
|
||||
content: YWRtaW4=
|
||||
password:
|
||||
secretReference:
|
||||
name: nats-gold-config
|
||||
namespace: nats-system
|
||||
keyPath: password
|
||||
tlsConfig:
|
||||
certificateAuthority:
|
||||
certificate:
|
||||
secretReference:
|
||||
name: nats-gold-config
|
||||
namespace: nats-system
|
||||
keyPath: ca.crt
|
||||
clientCertificate:
|
||||
certificate:
|
||||
secretReference:
|
||||
name: nats-gold-config
|
||||
namespace: nats-system
|
||||
keyPath: server.crt
|
||||
privateKey:
|
||||
secretReference:
|
||||
name: nats-gold-config
|
||||
namespace: nats-system
|
||||
keyPath: server.key
|
||||
17
config/samples/kamaji_v1alpha1_datastore_nats_notls.yaml
Normal file
17
config/samples/kamaji_v1alpha1_datastore_nats_notls.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: DataStore
|
||||
metadata:
|
||||
name: nats-notls
|
||||
spec:
|
||||
driver: NATS
|
||||
endpoints:
|
||||
- notls-nats.nats-system.svc:4222
|
||||
basicAuth:
|
||||
username:
|
||||
content: YWRtaW4=
|
||||
password:
|
||||
secretReference:
|
||||
name: nats-notls-config
|
||||
namespace: nats-system
|
||||
keyPath: password
|
||||
|
||||
34
config/samples/kamaji_v1alpha1_datastore_nats_silver.yaml
Normal file
34
config/samples/kamaji_v1alpha1_datastore_nats_silver.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: DataStore
|
||||
metadata:
|
||||
name: nats-silver
|
||||
spec:
|
||||
driver: NATS
|
||||
endpoints:
|
||||
- nats-silver.nats-system.svc:4222
|
||||
basicAuth:
|
||||
username:
|
||||
content: YWRtaW4=
|
||||
password:
|
||||
secretReference:
|
||||
name: nats-silver-config
|
||||
namespace: nats-system
|
||||
keyPath: password
|
||||
tlsConfig:
|
||||
certificateAuthority:
|
||||
certificate:
|
||||
secretReference:
|
||||
name: nats-silver-config
|
||||
namespace: nats-system
|
||||
keyPath: ca.crt
|
||||
clientCertificate:
|
||||
certificate:
|
||||
secretReference:
|
||||
name: nats-silver-config
|
||||
namespace: nats-system
|
||||
keyPath: server.crt
|
||||
privateKey:
|
||||
secretReference:
|
||||
name: nats-silver-config
|
||||
namespace: nats-system
|
||||
keyPath: server.key
|
||||
@@ -30,25 +30,6 @@ kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: validating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /validate--v1-secret
|
||||
failurePolicy: Ignore
|
||||
name: vdatastoresecrets.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- DELETE
|
||||
resources:
|
||||
- secrets
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
@@ -70,6 +51,25 @@ webhooks:
|
||||
resources:
|
||||
- datastores
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /validate--v1-secret
|
||||
failurePolicy: Ignore
|
||||
name: vdatastoresecrets.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- DELETE
|
||||
resources:
|
||||
- secrets
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
|
||||
@@ -40,12 +40,16 @@ func (s *CertificateLifecycle) Reconcile(ctx context.Context, request reconcile.
|
||||
logger.Info("starting CertificateLifecycle handling")
|
||||
|
||||
secret := corev1.Secret{}
|
||||
if err := s.client.Get(ctx, request.NamespacedName, &secret); err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
logger.Info("resource may have been deleted, skipping")
|
||||
err := s.client.Get(ctx, request.NamespacedName, &secret)
|
||||
if k8serrors.IsNotFound(err) {
|
||||
logger.Info("resource have been deleted, skipping")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
logger.Error(err, "cannot retrieve the required resource")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
checkType, ok := secret.GetLabels()[constants.ControllerLabelResource]
|
||||
@@ -56,7 +60,6 @@ func (s *CertificateLifecycle) Reconcile(ctx context.Context, request reconcile.
|
||||
}
|
||||
|
||||
var crt *x509.Certificate
|
||||
var err error
|
||||
|
||||
switch checkType {
|
||||
case "x509":
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
type DataStore struct {
|
||||
client client.Client
|
||||
Client client.Client
|
||||
// TenantControlPlaneTrigger is the channel used to communicate across the controllers:
|
||||
// if a Data Source is updated we have to be sure that the reconciliation of the certificates content
|
||||
// for each Tenant Control Plane is put in place properly.
|
||||
@@ -39,19 +39,21 @@ func (r *DataStore) Reconcile(ctx context.Context, request reconcile.Request) (r
|
||||
log := log.FromContext(ctx)
|
||||
|
||||
ds := &kamajiv1alpha1.DataStore{}
|
||||
if err := r.client.Get(ctx, request.NamespacedName, ds); err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
err := r.Client.Get(ctx, request.NamespacedName, ds)
|
||||
if k8serrors.IsNotFound(err) {
|
||||
log.Info("resource have been deleted, skipping")
|
||||
|
||||
log.Error(err, "unable to retrieve the request")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
log.Error(err, "cannot retrieve the required resource")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
tcpList := kamajiv1alpha1.TenantControlPlaneList{}
|
||||
|
||||
if err := r.client.List(ctx, &tcpList, client.MatchingFieldsSelector{
|
||||
if err := r.Client.List(ctx, &tcpList, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(kamajiv1alpha1.TenantControlPlaneUsedDataStoreKey, ds.GetName()),
|
||||
}); err != nil {
|
||||
log.Error(err, "cannot retrieve list of the Tenant Control Plane using the following instance")
|
||||
@@ -66,7 +68,7 @@ func (r *DataStore) Reconcile(ctx context.Context, request reconcile.Request) (r
|
||||
|
||||
ds.Status.UsedBy = tcpSets.List()
|
||||
|
||||
if err := r.client.Status().Update(ctx, ds); err != nil {
|
||||
if err := r.Client.Status().Update(ctx, ds); err != nil {
|
||||
log.Error(err, "cannot update the status for the given instance")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
@@ -81,12 +83,6 @@ func (r *DataStore) Reconcile(ctx context.Context, request reconcile.Request) (r
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *DataStore) InjectClient(client client.Client) error {
|
||||
r.client = client
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DataStore) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
enqueueFn := func(tcp *kamajiv1alpha1.TenantControlPlane, limitingInterface workqueue.RateLimitingInterface) {
|
||||
if dataStoreName := tcp.Status.Storage.DataStoreName; len(dataStoreName) > 0 {
|
||||
@@ -102,15 +98,15 @@ func (r *DataStore) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
For(&kamajiv1alpha1.DataStore{}, builder.WithPredicates(
|
||||
predicate.ResourceVersionChangedPredicate{},
|
||||
)).
|
||||
Watches(&source.Kind{Type: &kamajiv1alpha1.TenantControlPlane{}}, handler.Funcs{
|
||||
CreateFunc: func(createEvent event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
WatchesRawSource(source.Kind(mgr.GetCache(), &kamajiv1alpha1.TenantControlPlane{}), handler.Funcs{
|
||||
CreateFunc: func(_ context.Context, createEvent event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
enqueueFn(createEvent.Object.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
UpdateFunc: func(_ context.Context, updateEvent event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
enqueueFn(updateEvent.ObjectOld.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
enqueueFn(updateEvent.ObjectNew.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
},
|
||||
DeleteFunc: func(deleteEvent event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
DeleteFunc: func(_ context.Context, deleteEvent event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
enqueueFn(deleteEvent.Object.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
},
|
||||
}).
|
||||
|
||||
@@ -5,6 +5,7 @@ package finalizers
|
||||
|
||||
const (
|
||||
// DatastoreFinalizer is using a wrong name, since it's related to the underlying datastore.
|
||||
DatastoreFinalizer = "finalizer.kamaji.clastix.io"
|
||||
SootFinalizer = "finalizer.kamaji.clastix.io/soot"
|
||||
DatastoreFinalizer = "finalizer.kamaji.clastix.io"
|
||||
DatastoreSecretFinalizer = "finalizer.kamaji.clastix.io/datastore-secret"
|
||||
SootFinalizer = "finalizer.kamaji.clastix.io/soot"
|
||||
)
|
||||
|
||||
@@ -40,6 +40,7 @@ type GroupDeletableResourceBuilderConfiguration struct {
|
||||
tcpReconcilerConfig TenantControlPlaneReconcilerConfig
|
||||
tenantControlPlane kamajiv1alpha1.TenantControlPlane
|
||||
connection datastore.Connection
|
||||
dataStore kamajiv1alpha1.DataStore
|
||||
}
|
||||
|
||||
// GetResources returns a list of resources that will be used to provide tenant control planes
|
||||
@@ -60,6 +61,11 @@ func GetDeletableResources(tcp *kamajiv1alpha1.TenantControlPlane, config GroupD
|
||||
Client: config.client,
|
||||
Connection: config.connection,
|
||||
})
|
||||
res = append(res, &ds.Config{
|
||||
Client: config.client,
|
||||
ConnString: config.connection.GetConnectionString(),
|
||||
DataStore: config.dataStore,
|
||||
})
|
||||
}
|
||||
|
||||
return res
|
||||
@@ -176,6 +182,12 @@ func getKubeconfigResources(c client.Client, tcpReconcilerConfig TenantControlPl
|
||||
KubeConfigFileName: resources.AdminKubeConfigFileName,
|
||||
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
|
||||
},
|
||||
&resources.KubeconfigResource{
|
||||
Name: "admin-kubeconfig",
|
||||
Client: c,
|
||||
KubeConfigFileName: resources.SuperAdminKubeConfigFileName,
|
||||
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
|
||||
},
|
||||
&resources.KubeconfigResource{
|
||||
Name: "controller-manager-kubeconfig",
|
||||
Client: c,
|
||||
@@ -193,6 +205,9 @@ func getKubeconfigResources(c client.Client, tcpReconcilerConfig TenantControlPl
|
||||
|
||||
func getKubernetesStorageResources(c client.Client, dbConnection datastore.Connection, datastore kamajiv1alpha1.DataStore) []resources.Resource {
|
||||
return []resources.Resource{
|
||||
&ds.MultiTenancy{
|
||||
DataStore: datastore,
|
||||
},
|
||||
&ds.Config{
|
||||
Client: c,
|
||||
ConnString: dbConnection.GetConnectionString(),
|
||||
|
||||
@@ -35,7 +35,7 @@ type CoreDNS struct {
|
||||
TriggerChannel chan event.GenericEvent
|
||||
}
|
||||
|
||||
func (c *CoreDNS) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||
func (c *CoreDNS) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
|
||||
tcp, err := c.GetTenantControlPlaneFunc()
|
||||
if err != nil {
|
||||
c.logger.Error(err, "cannot retrieve TenantControlPlane")
|
||||
@@ -79,7 +79,7 @@ func (c *CoreDNS) SetupWithManager(mgr manager.Manager) error {
|
||||
For(&rbacv1.ClusterRoleBinding{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return object.GetName() == kubeadm.CoreDNSClusterRoleBindingName
|
||||
}))).
|
||||
Watches(&source.Channel{Source: c.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: c.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Owns(&rbacv1.ClusterRole{}).
|
||||
Owns(&corev1.ServiceAccount{}).
|
||||
Owns(&corev1.Service{}).
|
||||
|
||||
@@ -80,7 +80,7 @@ func (k *KonnectivityAgent) SetupWithManager(mgr manager.Manager) error {
|
||||
For(&appsv1.DaemonSet{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return object.GetName() == konnectivity.AgentName && object.GetNamespace() == konnectivity.AgentNamespace
|
||||
}))).
|
||||
Watches(&source.Kind{Type: &corev1.ServiceAccount{}}, handler.EnqueueRequestsFromMapFunc(func(object client.Object) []reconcile.Request {
|
||||
WatchesRawSource(source.Kind(mgr.GetCache(), &corev1.ServiceAccount{}), handler.EnqueueRequestsFromMapFunc(func(_ context.Context, object client.Object) []reconcile.Request {
|
||||
if object.GetName() == konnectivity.AgentName && object.GetNamespace() == konnectivity.AgentNamespace {
|
||||
return []reconcile.Request{
|
||||
{
|
||||
@@ -94,7 +94,7 @@ func (k *KonnectivityAgent) SetupWithManager(mgr manager.Manager) error {
|
||||
|
||||
return nil
|
||||
})).
|
||||
Watches(&source.Kind{Type: &v1.ClusterRoleBinding{}}, handler.EnqueueRequestsFromMapFunc(func(object client.Object) []reconcile.Request {
|
||||
WatchesRawSource(source.Kind(mgr.GetCache(), &v1.ClusterRoleBinding{}), handler.EnqueueRequestsFromMapFunc(func(_ context.Context, object client.Object) []reconcile.Request {
|
||||
if object.GetName() == konnectivity.CertCommonName {
|
||||
return []reconcile.Request{
|
||||
{
|
||||
@@ -107,6 +107,6 @@ func (k *KonnectivityAgent) SetupWithManager(mgr manager.Manager) error {
|
||||
|
||||
return nil
|
||||
})).
|
||||
Watches(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Complete(k)
|
||||
}
|
||||
|
||||
@@ -67,6 +67,6 @@ func (k *KubeadmPhase) SetupWithManager(mgr manager.Manager) error {
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
For(k.Phase.GetWatchedObject(), builder.WithPredicates(predicate.NewPredicateFuncs(k.Phase.GetPredicateFunc()))).
|
||||
Watches(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Complete(k)
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ func (k *KubeProxy) SetupWithManager(mgr manager.Manager) error {
|
||||
For(&rbacv1.ClusterRoleBinding{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return object.GetName() == kubeadm.KubeProxyClusterRoleBindingName
|
||||
}))).
|
||||
Watches(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: k.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Owns(&corev1.ServiceAccount{}).
|
||||
Owns(&rbacv1.Role{}).
|
||||
Owns(&rbacv1.RoleBinding{}).
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -84,7 +84,7 @@ func (m *Migrate) createOrUpdate(ctx context.Context) error {
|
||||
{
|
||||
Name: "leases.migrate.kamaji.clastix.io",
|
||||
ClientConfig: admissionregistrationv1.WebhookClientConfig{
|
||||
URL: pointer.String(fmt.Sprintf("https://%s.%s.svc:443/migrate", m.WebhookServiceName, m.WebhookNamespace)),
|
||||
URL: pointer.To(fmt.Sprintf("https://%s.%s.svc:443/migrate", m.WebhookServiceName, m.WebhookNamespace)),
|
||||
CABundle: m.WebhookCABundle,
|
||||
},
|
||||
Rules: []admissionregistrationv1.RuleWithOperations{
|
||||
@@ -128,7 +128,7 @@ func (m *Migrate) createOrUpdate(ctx context.Context) error {
|
||||
{
|
||||
Name: "catchall.migrate.kamaji.clastix.io",
|
||||
ClientConfig: admissionregistrationv1.WebhookClientConfig{
|
||||
URL: pointer.String(fmt.Sprintf("https://%s.%s.svc:443/migrate", m.WebhookServiceName, m.WebhookNamespace)),
|
||||
URL: pointer.To(fmt.Sprintf("https://%s.%s.svc:443/migrate", m.WebhookServiceName, m.WebhookNamespace)),
|
||||
CABundle: m.WebhookCABundle,
|
||||
},
|
||||
Rules: []admissionregistrationv1.RuleWithOperations{
|
||||
@@ -187,7 +187,7 @@ func (m *Migrate) SetupWithManager(mgr manager.Manager) error {
|
||||
|
||||
return object.GetName() == vwc.GetName()
|
||||
}))).
|
||||
Watches(&source.Channel{Source: m.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: m.TriggerChannel}, &handler.EnqueueRequestForObject{}).
|
||||
Complete(m)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,13 +12,13 @@ import (
|
||||
"k8s.io/client-go/util/retry"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
@@ -175,10 +175,12 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
}()
|
||||
|
||||
mgr, err := controllerruntime.NewManager(tcpRest, controllerruntime.Options{
|
||||
Logger: log.Log.WithName(fmt.Sprintf("soot_%s_%s", tcp.GetNamespace(), tcp.GetName())),
|
||||
Scheme: m.client.Scheme(),
|
||||
MetricsBindAddress: "0",
|
||||
NewClient: func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) {
|
||||
Logger: log.Log.WithName(fmt.Sprintf("soot_%s_%s", tcp.GetNamespace(), tcp.GetName())),
|
||||
Scheme: m.client.Scheme(),
|
||||
Metrics: metricsserver.Options{
|
||||
BindAddress: "0",
|
||||
},
|
||||
NewClient: func(config *rest.Config, options client.Options) (client.Client, error) {
|
||||
return client.New(config, client.Options{
|
||||
Scheme: m.client.Scheme(),
|
||||
})
|
||||
@@ -256,6 +258,17 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
if err = bootstrapToken.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
kubeadmRbac := &controllers.KubeadmPhase{
|
||||
GetTenantControlPlaneFunc: m.retrieveTenantControlPlane(tcpCtx, request),
|
||||
Phase: &resources.KubeadmPhase{
|
||||
Client: m.AdminClient,
|
||||
Phase: resources.PhaseClusterAdminRBAC,
|
||||
},
|
||||
}
|
||||
if err = kubeadmRbac.SetupWithManager(mgr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// Starting the manager
|
||||
go func() {
|
||||
if err = mgr.Start(tcpCtx); err != nil {
|
||||
@@ -289,7 +302,7 @@ func (m *Manager) SetupWithManager(mgr manager.Manager) error {
|
||||
m.sootMap = make(map[string]sootItem)
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
Watches(&source.Channel{Source: m.sootManagerErrChan}, &handler.EnqueueRequestForObject{}).
|
||||
WatchesRawSource(&source.Channel{Source: m.sootManagerErrChan}, &handler.EnqueueRequestForObject{}).
|
||||
For(&kamajiv1alpha1.TenantControlPlane{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
obj := object.(*kamajiv1alpha1.TenantControlPlane) //nolint:forcetypeassert
|
||||
// status is required to understand if we have to start or stop the soot manager
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apimachineryerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/utils/clock"
|
||||
@@ -84,16 +84,15 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
defer cancelFn()
|
||||
|
||||
tenantControlPlane, err := r.getTenantControlPlane(ctx, req.NamespacedName)()
|
||||
if k8serrors.IsNotFound(err) {
|
||||
log.Info("resource have been deleted, skipping")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
if apimachineryerrors.IsNotFound(err) {
|
||||
log.Info("resource may have been deleted, skipping")
|
||||
log.Error(err, "cannot retrieve the required resource")
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
log.Error(err, "cannot retrieve the required instance")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
releaser, err := mutex.Acquire(r.mutexSpec(tenantControlPlane))
|
||||
@@ -145,6 +144,7 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
tcpReconcilerConfig: r.Config,
|
||||
tenantControlPlane: *tenantControlPlane,
|
||||
connection: dsConnection,
|
||||
dataStore: *ds,
|
||||
}
|
||||
|
||||
for _, resource := range GetDeletableResources(tenantControlPlane, groupDeletableResourceBuilderConfiguration) {
|
||||
@@ -227,7 +227,7 @@ func (r *TenantControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error
|
||||
r.clock = clock.RealClock{}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Watches(&source.Channel{Source: r.CertificateChan}, handler.Funcs{GenericFunc: func(genericEvent event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
WatchesRawSource(&source.Channel{Source: r.CertificateChan}, handler.Funcs{GenericFunc: func(_ context.Context, genericEvent event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
limitingInterface.AddRateLimited(ctrl.Request{
|
||||
NamespacedName: k8stypes.NamespacedName{
|
||||
Namespace: genericEvent.Object.GetNamespace(),
|
||||
@@ -235,7 +235,7 @@ func (r *TenantControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error
|
||||
},
|
||||
})
|
||||
}}).
|
||||
Watches(&source.Channel{Source: r.TriggerChan}, handler.Funcs{GenericFunc: func(genericEvent event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
WatchesRawSource(&source.Channel{Source: r.TriggerChan}, handler.Funcs{GenericFunc: func(_ context.Context, genericEvent event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
limitingInterface.AddRateLimited(ctrl.Request{
|
||||
NamespacedName: k8stypes.NamespacedName{
|
||||
Namespace: genericEvent.Object.GetNamespace(),
|
||||
@@ -249,7 +249,7 @@ func (r *TenantControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error
|
||||
Owns(&appsv1.Deployment{}).
|
||||
Owns(&corev1.Service{}).
|
||||
Owns(&networkingv1.Ingress{}).
|
||||
Watches(&source.Kind{Type: &batchv1.Job{}}, handler.EnqueueRequestsFromMapFunc(func(object client.Object) []reconcile.Request {
|
||||
WatchesRawSource(source.Kind(mgr.GetCache(), &batchv1.Job{}), handler.EnqueueRequestsFromMapFunc(func(_ context.Context, object client.Object) []reconcile.Request {
|
||||
labels := object.GetLabels()
|
||||
|
||||
name, namespace := labels["tcp.kamaji.clastix.io/name"], labels["tcp.kamaji.clastix.io/namespace"]
|
||||
|
||||
@@ -26,7 +26,7 @@ nodes:
|
||||
## expose port 31132 of the node to port 31132 on the host for konnectivity
|
||||
- containerPort: 31132
|
||||
hostPort: 31132
|
||||
protocol: TCP
|
||||
protocol: TCP
|
||||
## expose port 31443 of the node to port 31443 on the host
|
||||
- containerPort: 31443
|
||||
hostPort: 31443
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"CN": "bronze.mysql-system.svc.cluster.local",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"hosts": [
|
||||
"127.0.0.1",
|
||||
"localhost",
|
||||
"bronze",
|
||||
"bronze.mysql-system.svc",
|
||||
"bronze.mysql-system.svc.cluster.local"
|
||||
]
|
||||
}
|
||||
40
deploy/kine/nats/Makefile
Normal file
40
deploy/kine/nats/Makefile
Normal file
@@ -0,0 +1,40 @@
|
||||
ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
|
||||
NAME:=default
|
||||
NAMESPACE:=nats-system
|
||||
|
||||
.PHONY: helm
|
||||
HELM = $(shell pwd)/../../../bin/helm
|
||||
helm: ## Download helm locally if necessary.
|
||||
$(call go-install-tool,$(HELM),helm.sh/helm/v3/cmd/helm@v3.9.0)
|
||||
|
||||
nats: nats-certificates nats-secret nats-deployment
|
||||
|
||||
nats-certificates:
|
||||
rm -rf $(ROOT_DIR)/certs/$(NAME) && mkdir -p $(ROOT_DIR)/certs/$(NAME)
|
||||
cfssl gencert -initca $(ROOT_DIR)/ca-csr.json | cfssljson -bare $(ROOT_DIR)/certs/$(NAME)/ca
|
||||
@mv $(ROOT_DIR)/certs/$(NAME)/ca.pem $(ROOT_DIR)/certs/$(NAME)/ca.crt
|
||||
@mv $(ROOT_DIR)/certs/$(NAME)/ca-key.pem $(ROOT_DIR)/certs/$(NAME)/ca.key
|
||||
@NAME=$(NAME) NAMESPACE=$(NAMESPACE) envsubst < server-csr.json > $(ROOT_DIR)/certs/$(NAME)/server-csr.json
|
||||
cfssl gencert -ca=$(ROOT_DIR)/certs/$(NAME)/ca.crt -ca-key=$(ROOT_DIR)/certs/$(NAME)/ca.key \
|
||||
-config=$(ROOT_DIR)/config.json -profile=server \
|
||||
$(ROOT_DIR)/certs/$(NAME)/server-csr.json | cfssljson -bare $(ROOT_DIR)/certs/$(NAME)/server
|
||||
@mv $(ROOT_DIR)/certs/$(NAME)/server.pem $(ROOT_DIR)/certs/$(NAME)/server.crt
|
||||
@mv $(ROOT_DIR)/certs/$(NAME)/server-key.pem $(ROOT_DIR)/certs/$(NAME)/server.key
|
||||
chmod 644 $(ROOT_DIR)/certs/$(NAME)/*
|
||||
|
||||
nats-secret:
|
||||
@kubectl create namespace $(NAMESPACE) || true
|
||||
@kubectl -n $(NAMESPACE) create secret generic nats-$(NAME)-config \
|
||||
--from-file=$(ROOT_DIR)/certs/$(NAME)/ca.crt --from-file=$(ROOT_DIR)/certs/$(NAME)/ca.key \
|
||||
--from-file=$(ROOT_DIR)/certs/$(NAME)/server.key --from-file=$(ROOT_DIR)/certs/$(NAME)/server.crt \
|
||||
--from-literal=password=password \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
nats-deployment:
|
||||
@VALUES_FILE=$(if $(findstring notls,$(NAME)),values-notls.yaml,values.yaml); \
|
||||
NAME=$(NAME) envsubst < $(ROOT_DIR)/$$VALUES_FILE | $(HELM) upgrade --install $(NAME) nats/nats --create-namespace -n nats-system -f -
|
||||
|
||||
|
||||
nats-destroy:
|
||||
@NAME=$(NAME) envsubst < $(ROOT_DIR)/nats.yaml | kubectl -n $(NAMESPACE) delete --ignore-not-found -f -
|
||||
@kubectl delete -n $(NAMESPACE) secret mysql-$(NAME)config --ignore-not-found
|
||||
18
deploy/kine/nats/ca-csr.json
Normal file
18
deploy/kine/nats/ca-csr.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"CN": "Clastix CA",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "IT",
|
||||
"ST": "Italy",
|
||||
"L": "Milan"
|
||||
}
|
||||
],
|
||||
"hosts": [
|
||||
"127.0.0.1",
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
18
deploy/kine/nats/config.json
Normal file
18
deploy/kine/nats/config.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"signing": {
|
||||
"default": {
|
||||
"expiry": "8760h"
|
||||
},
|
||||
"profiles": {
|
||||
"server": {
|
||||
"expiry": "8760h",
|
||||
"usages": [
|
||||
"signing",
|
||||
"key encipherment",
|
||||
"server auth",
|
||||
"client auth"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
14
deploy/kine/nats/server-csr.json
Normal file
14
deploy/kine/nats/server-csr.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"CN": "$NAME.$NAMESPACE.svc.cluster.local",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"hosts": [
|
||||
"127.0.0.1",
|
||||
"localhost",
|
||||
"$NAME-nats",
|
||||
"$NAME-nats.$NAMESPACE.svc",
|
||||
"$NAME-nats.$NAMESPACE.svc.cluster.local"
|
||||
]
|
||||
}
|
||||
14
deploy/kine/nats/values-notls.yaml
Normal file
14
deploy/kine/nats/values-notls.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
config:
|
||||
merge:
|
||||
accounts:
|
||||
private:
|
||||
jetstream: enabled
|
||||
users:
|
||||
- {user: admin, password: "password", permissions: {subscribe: [">"], publish: [">"]}}
|
||||
cluster:
|
||||
enabled: no
|
||||
jetstream:
|
||||
enabled: true
|
||||
fileStore:
|
||||
pvc:
|
||||
size: 32Mi
|
||||
20
deploy/kine/nats/values.yaml
Normal file
20
deploy/kine/nats/values.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
config:
|
||||
merge:
|
||||
accounts:
|
||||
private:
|
||||
jetstream: enabled
|
||||
users:
|
||||
- {user: admin, password: "password", permissions: {subscribe: [">"], publish: [">"]}}
|
||||
cluster:
|
||||
enabled: no
|
||||
nats:
|
||||
tls:
|
||||
enabled: true
|
||||
secretName: nats-$NAME-config
|
||||
cert: server.crt
|
||||
key: server.key
|
||||
jetstream:
|
||||
enabled: true
|
||||
fileStore:
|
||||
pvc:
|
||||
size: 32Mi
|
||||
@@ -86,6 +86,21 @@ helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
|
||||
!!! note "A managed datastore is highly recommended in production"
|
||||
The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides the code to setup a multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a more robust storage system, as `MySQL` or `PostgreSQL` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
|
||||
|
||||
Now you should end up with a working Kamaji instance, including the default `datastore`:
|
||||
|
||||
```bash
|
||||
kubectl -n kamaji-system get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
etcd-0 1/1 Running 0 50s
|
||||
etcd-1 1/1 Running 0 60s
|
||||
etcd-2 1/1 Running 0 90s
|
||||
kamaji-7949578bfb-lj44p 1/1 Running 0 12s
|
||||
```
|
||||
|
||||
> An unsuccessful first installation could fail for several reasons, such as missing a `StorageClass`, or even for a trivial `Ctrl+C` during the installation phase.
|
||||
>
|
||||
> See the [Cleanup](#cleanup) section before to retry an aborted installation.
|
||||
|
||||
## Create Tenant Cluster
|
||||
|
||||
### Tenant Control Plane
|
||||
@@ -319,7 +334,8 @@ tenant-00-worker-02 Ready <none> 2m32s v1.25.0
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
Remove the worker nodes joined the tenant control plane
|
||||
### Delete a Tenant Cluster
|
||||
First, remove the worker nodes joined the tenant control plane
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig delete nodes --all
|
||||
@@ -337,10 +353,37 @@ for i in "${!HOSTS[@]}"; do
|
||||
done
|
||||
```
|
||||
|
||||
Delete the tenant control plane from kamaji
|
||||
Delete the tenant control plane from Kamaji
|
||||
|
||||
```bash
|
||||
kubectl delete -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
|
||||
```
|
||||
|
||||
### Uninstall Kamaji
|
||||
Uninstall the Kamaji controller by removing the Helm release
|
||||
|
||||
```bash
|
||||
helm uninstall kamaji -n kamaji-system
|
||||
```
|
||||
|
||||
The default datastore installed three `etcd` replicas with persistent volumes, so remove the `PersistentVolumeClaims` resources:
|
||||
|
||||
```bash
|
||||
kubectl -n kamaji-system delete pvc --all
|
||||
```
|
||||
|
||||
Also delete the custom resources:
|
||||
|
||||
```bash
|
||||
kubectl delete crd tenantcontrolplanes.kamaji.clastix.io
|
||||
kubectl delete crd datastores.kamaji.clastix.io
|
||||
```
|
||||
|
||||
In case of a broken installation, manually remove the hooks installed by Kamaji:
|
||||
|
||||
```bash
|
||||
kubectl delete ValidatingWebhookConfiguration kamaji-validating-webhook-configuration
|
||||
kubectl delete MutatingWebhookConfiguration kamaji-mutating-webhook-configuration
|
||||
```
|
||||
|
||||
That's all folks!
|
||||
|
||||
@@ -1,39 +1,28 @@
|
||||
# Use Alternative Datastores
|
||||
|
||||
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine) integration. One of the implementations is [PostgreSQL](https://www.postgresql.org/).
|
||||
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine) integration.
|
||||
|
||||
## Install the datastore
|
||||
## Installing Drivers
|
||||
|
||||
On the Management Cluster, install one of the alternative supported datastore:
|
||||
The following `make` recipes help you to setup alternative `Datastore` resources.
|
||||
|
||||
- **MySQL** install it with command:
|
||||
> The default settings are not production grade:
|
||||
> the following scripts are just used to test the Kamaji usage of different drivers.
|
||||
|
||||
`$ make -C deploy/kine/mysql mariadb`
|
||||
On the Management Cluster, you can use the following commands:
|
||||
|
||||
- **PostgreSQL** install it with command:
|
||||
- **MySQL**: `$ make -C deploy/kine/mysql mariadb`
|
||||
|
||||
`$ make -C deploy/kine/postgresql postgresql`
|
||||
- **PostgreSQL**: `$ make -C deploy/kine/postgresql postgresql`
|
||||
|
||||
## Install Cert Manager
|
||||
- **NATS**: `$ make -C deploy/kine/nats nats`
|
||||
|
||||
As prerequisite for Kamaji, install the Cert Manager
|
||||
## Defining a default Datastore upon Kamaji installation
|
||||
|
||||
```bash
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update
|
||||
helm install \
|
||||
cert-manager jetstack/cert-manager \
|
||||
--namespace cert-manager \
|
||||
--create-namespace \
|
||||
--version v1.11.0 \
|
||||
--set installCRDs=true
|
||||
```
|
||||
Use Helm to install the Kamaji Operator and make sure it uses a datastore with the proper driver `datastore.driver=<MySQL|PostgreSQL|NATS>`.
|
||||
Please refer to the Chart available values for more information on supported options.
|
||||
|
||||
## Install Kamaji
|
||||
|
||||
Use Helm to install the Kamaji Operator and make sure it uses a datastore with the proper driver `datastore.driver=<MySQL|PostgreSQL>`.
|
||||
|
||||
For example, with a PostreSQL datastore installed:
|
||||
For example, with a PostgreSQL datastore installed:
|
||||
|
||||
```bash
|
||||
helm install kamaji charts/kamaji -n kamaji-system --create-namespace \
|
||||
@@ -60,4 +49,19 @@ helm install kamaji charts/kamaji -n kamaji-system --create-namespace \
|
||||
--set datastore.tlsConfig.clientCertificate.privateKey.keyPath=tls.key
|
||||
```
|
||||
|
||||
Once installed, you will able to create Tenant Control Planes using an alternative datastore.
|
||||
Once installed, you will be able to create Tenant Control Planes using an alternative datastore.
|
||||
|
||||
## Defining specific Datastore per Tenant Control Plane
|
||||
|
||||
Each `TenantControlPlane` can refer to a specific `Datastore` thanks to the `/spec/dataStore` field.
|
||||
This allows you to implement your preferred sharding or pooling strategy.
|
||||
|
||||
When the said key is omitted, Kamaji will use the default datastore configured with its CLI argument `--datastore`.
|
||||
|
||||
## NATS considerations
|
||||
|
||||
The NATS support is still experimental, mostly because multi-tenancy is **NOT** supported.
|
||||
|
||||
> A `NATS` based DataStore can host one and only one Tenant Control Plane.
|
||||
> When a `TenantControlPlane` is referring to a NATS `DataStore` already used by another instance,
|
||||
> reconciliation will fail and blocked.
|
||||
|
||||
@@ -99,7 +99,7 @@ The rotation will occur the day before their expiration.
|
||||
> Nota Bene:
|
||||
>
|
||||
> Kamaji is responsible for creating the `etcd` client certificate, and the generation of a new one will occur.
|
||||
> For other Datastore drivers, such as MySQL or PostgreSQL, the referenced Secret will always be deleted by the Controller to trigger the rotation:
|
||||
> For other Datastore drivers, such as MySQL, PostgreSQL, or NATS, the referenced Secret will always be deleted by the Controller to trigger the rotation:
|
||||
> the PKI management, since it's offloaded externally, must provide the renewed certificates.
|
||||
|
||||
## Certificate Authority rotation
|
||||
|
||||
@@ -69,7 +69,9 @@ From the main view, clicking on a Tenant Control Plane row will bring you to the
|
||||

|
||||
|
||||
### Working with Datastore
|
||||
From the menu bar on the left, clicking on the Datastores item, you can access the list of provisioned Datastores. It shows a summary about datastores, including name and the used driver, i.e. etcd, mysql, and postgresql.
|
||||
|
||||
From the menu bar on the left, clicking on the Datastores item, you can access the list of provisioned Datastores.
|
||||
It shows a summary about datastores, including name and the used driver, i.e. `etcd`, `MySQL`, `PostgreSQL`, and `NATS`.
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Datastore Migration
|
||||
|
||||
On the Management Cluster, you can deploy one or more multi-tenant datastores as `etcd`, `PostgreSQL`, and `MySQL` to save the state of the Tenant Clusters. A Tenant Control Plane can be migrated from a datastore to another one without service disruption or without complex and error prone backup & restore procedures.
|
||||
On the Management Cluster, you can deploy one or more multi-tenant datastores as `etcd`, `PostgreSQL`, `MySQL`, and `NATS` to save the state of the Tenant Clusters.
|
||||
A Tenant Control Plane can be migrated from a datastore to another one without service disruption or without complex and error-prone backup & restore procedures.
|
||||
|
||||
This guide will assist you to live migrate Tenant's data from a datastore to another one having the same `etcd` driver.
|
||||
|
||||
@@ -80,7 +81,7 @@ helm install dedicated clastix/kamaji-etcd -n dedicated --create-namespace --set
|
||||
You should end up with a new datastore `dedicated` provided by an `etcd` cluster:
|
||||
|
||||
```yaml
|
||||
kubectl get datastore dedicated -o yaml
|
||||
# kubectl get datastore dedicated -o yaml
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: DataStore
|
||||
metadata:
|
||||
|
||||
@@ -125,7 +125,7 @@ helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
|
||||
```
|
||||
|
||||
!!! note "A managed datastore is highly recommended in production"
|
||||
The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides the code to setup a multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a more robust storage system, as `MySQL` or `PostgreSQL` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
|
||||
The [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides the code to setup a multi-tenant `etcd` running as StatefulSet made of three replicas. Optionally, Kamaji offers support for a more robust storage system, as `MySQL`, `PostgreSQL`, or `NATS` compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
|
||||
|
||||
## Create Tenant Cluster
|
||||
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 152 KiB After Width: | Height: | Size: 163 KiB |
File diff suppressed because it is too large
Load Diff
@@ -12,3 +12,10 @@ In Kamaji, there are different components that might require independent version
|
||||
| v0.3.2 | v1.22+ | [v1.21.0 .. v1.27.3] |
|
||||
| v0.3.3 | v1.22+ | [v1.21.0 .. v1.27.3] |
|
||||
| v0.3.4 | v1.22+ | [v1.21.0 .. v1.28.1] |
|
||||
| v0.3.5 | v1.22+ | [v1.21.0 .. v1.28.1] |
|
||||
| v0.3.5 | v1.22+ | [v1.21.0 .. v1.28.1] |
|
||||
| v0.4.0 | v1.22+ | [v1.21.0 .. v1.29.0] |
|
||||
| v0.4.1 | v1.22+ | [v1.21.0 .. v1.29.1] |
|
||||
| v0.4.2 | v1.22+ | [v1.21.0 .. v1.29.1] |
|
||||
| v0.5.0 | v1.22+ | [v1.21.0 .. v1.30.0] |
|
||||
| v0.6.0 | v1.22+ | [v1.21.0 .. v1.30.1] |
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
@@ -44,7 +44,7 @@ var _ = BeforeSuite(func() {
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
UseExistingCluster: pointer.Bool(true),
|
||||
UseExistingCluster: pointer.To(true),
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -27,7 +27,7 @@ var _ = Describe("Deploy a TenantControlPlane resource with additional resources
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
AdditionalInitContainers: []corev1.Container{{
|
||||
Name: initContainerName,
|
||||
Image: initContainerImage,
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/internal/utilities"
|
||||
@@ -43,7 +43,7 @@ var _ = Describe("Deploy a TenantControlPlane resource with additional options",
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
AdditionalInitContainers: []corev1.Container{{
|
||||
Name: initContainerName,
|
||||
Image: initContainerImage,
|
||||
@@ -256,7 +256,7 @@ var _ = Describe("Deploy a TenantControlPlane resource with additional options",
|
||||
}, &deploy)).NotTo(HaveOccurred())
|
||||
|
||||
return deploy.Spec.Template.Spec.InitContainers
|
||||
}, 10*time.Second, time.Second).Should(HaveLen(0), "Deployment should not contain anymore the init container")
|
||||
}, 10*time.Second, time.Second).Should(BeEmpty(), "Deployment should not contain anymore the init container")
|
||||
|
||||
Eventually(func() bool {
|
||||
deploy := appsv1.Deployment{}
|
||||
|
||||
74
e2e/tcp_custom_sa_test.go
Normal file
74
e2e/tcp_custom_sa_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
|
||||
var _ = Describe("Deploy a TenantControlPlane with resource with custom service account", func() {
|
||||
// service account object
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
// Fill TenantControlPlane object
|
||||
tcp := &kamajiv1alpha1.TenantControlPlane{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "tcp-clusterip-customsa",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.To(int32(1)),
|
||||
ServiceAccountName: sa.GetName(),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
},
|
||||
},
|
||||
NetworkProfile: kamajiv1alpha1.NetworkProfileSpec{
|
||||
Address: "172.18.0.2",
|
||||
},
|
||||
Kubernetes: kamajiv1alpha1.KubernetesSpec{
|
||||
Version: "v1.23.6",
|
||||
Kubelet: kamajiv1alpha1.KubeletSpec{
|
||||
CGroupFS: "cgroupfs",
|
||||
},
|
||||
AdmissionControllers: kamajiv1alpha1.AdmissionControllers{
|
||||
"LimitRanger",
|
||||
"ResourceQuota",
|
||||
},
|
||||
},
|
||||
Addons: kamajiv1alpha1.AddonsSpec{},
|
||||
},
|
||||
}
|
||||
|
||||
// Create service account and TenantControlPlane resources into the cluster
|
||||
JustBeforeEach(func() {
|
||||
Expect(k8sClient.Create(context.Background(), sa)).NotTo(HaveOccurred())
|
||||
Expect(k8sClient.Create(context.Background(), tcp)).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Delete the service account and TenantControlPlane resources after test is finished
|
||||
JustAfterEach(func() {
|
||||
Expect(k8sClient.Delete(context.Background(), tcp)).Should(Succeed())
|
||||
})
|
||||
// Check if TenantControlPlane resource has been created and if its pods have the right service account
|
||||
It("Should be Ready and have correct sa", func() {
|
||||
StatusMustEqualTo(tcp, kamajiv1alpha1.VersionReady)
|
||||
PodsServiceAccountMustEqualTo(tcp, sa)
|
||||
})
|
||||
})
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
@@ -36,7 +36,7 @@ var _ = Describe("When migrating a Tenant Control Plane to another datastore", f
|
||||
DataStore: "etcd-bronze",
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "NodePort",
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -25,7 +25,7 @@ var _ = Describe("Deploy a TenantControlPlane resource with the MySQL driver", f
|
||||
DataStore: "mysql-bronze",
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
54
e2e/tcp_nats_ready_no_tls_test.go
Normal file
54
e2e/tcp_nats_ready_no_tls_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
|
||||
var _ = Describe("Deploy a TenantControlPlane resource with the NATS driver without TLS", func() {
|
||||
// Fill TenantControlPlane object
|
||||
tcp := &kamajiv1alpha1.TenantControlPlane{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nats-notls",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
DataStore: "nats-notls",
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
},
|
||||
},
|
||||
Kubernetes: kamajiv1alpha1.KubernetesSpec{
|
||||
Version: "v1.23.6",
|
||||
Kubelet: kamajiv1alpha1.KubeletSpec{
|
||||
CGroupFS: "cgroupfs",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Create a TenantControlPlane resource into the cluster
|
||||
JustBeforeEach(func() {
|
||||
Expect(k8sClient.Create(context.Background(), tcp)).NotTo(HaveOccurred())
|
||||
})
|
||||
// Delete the TenantControlPlane resource after test is finished
|
||||
JustAfterEach(func() {
|
||||
Expect(k8sClient.Delete(context.Background(), tcp)).Should(Succeed())
|
||||
})
|
||||
// Check if TenantControlPlane resource has been created
|
||||
It("Should be Ready", func() {
|
||||
StatusMustEqualTo(tcp, kamajiv1alpha1.VersionReady)
|
||||
})
|
||||
})
|
||||
54
e2e/tcp_nats_ready_test.go
Normal file
54
e2e/tcp_nats_ready_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
|
||||
var _ = Describe("Deploy a TenantControlPlane resource with the NATS driver", func() {
|
||||
// Fill TenantControlPlane object
|
||||
tcp := &kamajiv1alpha1.TenantControlPlane{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nats",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
DataStore: "nats-bronze",
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
},
|
||||
},
|
||||
Kubernetes: kamajiv1alpha1.KubernetesSpec{
|
||||
Version: "v1.23.6",
|
||||
Kubelet: kamajiv1alpha1.KubeletSpec{
|
||||
CGroupFS: "cgroupfs",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Create a TenantControlPlane resource into the cluster
|
||||
JustBeforeEach(func() {
|
||||
Expect(k8sClient.Create(context.Background(), tcp)).NotTo(HaveOccurred())
|
||||
})
|
||||
// Delete the TenantControlPlane resource after test is finished
|
||||
JustAfterEach(func() {
|
||||
Expect(k8sClient.Delete(context.Background(), tcp)).Should(Succeed())
|
||||
})
|
||||
// Check if TenantControlPlane resource has been created
|
||||
It("Should be Ready", func() {
|
||||
StatusMustEqualTo(tcp, kamajiv1alpha1.VersionReady)
|
||||
})
|
||||
})
|
||||
78
e2e/tcp_pod_additional_metadata_test.go
Normal file
78
e2e/tcp_pod_additional_metadata_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
|
||||
var _ = Describe("Deploy a TenantControlPlane resource with additional pod metadata", func() {
|
||||
// Fill TenantControlPlane object
|
||||
tcp := &kamajiv1alpha1.TenantControlPlane{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "tcp-clusterip-additional-metadata",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.To(int32(1)),
|
||||
PodAdditionalMetadata: kamajiv1alpha1.AdditionalMetadata{
|
||||
Labels: map[string]string{
|
||||
"hello-label": "world",
|
||||
"foo-label": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"hello-ann": "world",
|
||||
"foo-ann": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
},
|
||||
},
|
||||
NetworkProfile: kamajiv1alpha1.NetworkProfileSpec{
|
||||
Address: "172.18.0.2",
|
||||
},
|
||||
Kubernetes: kamajiv1alpha1.KubernetesSpec{
|
||||
Version: "v1.23.6",
|
||||
Kubelet: kamajiv1alpha1.KubeletSpec{
|
||||
CGroupFS: "cgroupfs",
|
||||
},
|
||||
AdmissionControllers: kamajiv1alpha1.AdmissionControllers{
|
||||
"LimitRanger",
|
||||
"ResourceQuota",
|
||||
},
|
||||
},
|
||||
Addons: kamajiv1alpha1.AddonsSpec{},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a TenantControlPlane resource into the cluster
|
||||
JustBeforeEach(func() {
|
||||
Expect(k8sClient.Create(context.Background(), tcp)).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Delete the TenantControlPlane resource after test is finished
|
||||
JustAfterEach(func() {
|
||||
Expect(k8sClient.Delete(context.Background(), tcp)).Should(Succeed())
|
||||
})
|
||||
|
||||
// Check if TenantControlPlane resource has been created
|
||||
It("Should be Ready with expected additional metadata", func() {
|
||||
StatusMustEqualTo(tcp, kamajiv1alpha1.VersionReady)
|
||||
AllPodsLabelMustEqualTo(tcp, "hello-label", "world")
|
||||
AllPodsLabelMustEqualTo(tcp, "foo-label", "bar")
|
||||
AllPodsAnnotationMustEqualTo(tcp, "hello-ann", "world")
|
||||
AllPodsAnnotationMustEqualTo(tcp, "foo-ann", "bar")
|
||||
})
|
||||
})
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -25,7 +25,7 @@ var _ = Describe("Deploy a TenantControlPlane resource with the PostgreSQL drive
|
||||
DataStore: "postgresql-bronze",
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -24,7 +24,7 @@ var _ = Describe("Deploy a TenantControlPlane resource", func() {
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -27,7 +27,7 @@ var _ = Describe("Deploy a TenantControlPlane with wrong preferred kubelet addre
|
||||
DataStore: "default",
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
@@ -63,7 +63,7 @@ var _ = Describe("Deploy a TenantControlPlane with wrong preferred kubelet addre
|
||||
DataStore: "default",
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -26,7 +26,7 @@ var _ = Describe("downgrade of a TenantControlPlane Kubernetes version", func()
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -26,7 +26,7 @@ var _ = Describe("non-linear minor upgrade of a TenantControlPlane Kubernetes ve
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/internal/upgrade"
|
||||
@@ -36,7 +36,7 @@ var _ = Describe("using an unsupported TenantControlPlane Kubernetes version", f
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
@@ -64,7 +64,7 @@ var _ = Describe("using an unsupported TenantControlPlane Kubernetes version", f
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "ClusterIP",
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -136,3 +137,61 @@ func StatusMustEqualTo(tcp *kamajiv1alpha1.TenantControlPlane, status kamajiv1al
|
||||
return *tcp.Status.Kubernetes.Version.Status
|
||||
}, 5*time.Minute, time.Second).Should(Equal(status))
|
||||
}
|
||||
|
||||
func AllPodsLabelMustEqualTo(tcp *kamajiv1alpha1.TenantControlPlane, label string, value string) {
|
||||
Eventually(func() bool {
|
||||
tcpPods := &corev1.PodList{}
|
||||
err := k8sClient.List(context.Background(), tcpPods, client.MatchingLabels{
|
||||
"kamaji.clastix.io/name": tcp.GetName(),
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for _, pod := range tcpPods.Items {
|
||||
if pod.Labels[label] != value {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}, 5*time.Minute, time.Second).Should(BeTrue())
|
||||
}
|
||||
|
||||
func AllPodsAnnotationMustEqualTo(tcp *kamajiv1alpha1.TenantControlPlane, annotation string, value string) {
|
||||
Eventually(func() bool {
|
||||
tcpPods := &corev1.PodList{}
|
||||
err := k8sClient.List(context.Background(), tcpPods, client.MatchingLabels{
|
||||
"kamaji.clastix.io/name": tcp.GetName(),
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for _, pod := range tcpPods.Items {
|
||||
if pod.Annotations[annotation] != value {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}, 5*time.Minute, time.Second).Should(BeTrue())
|
||||
}
|
||||
|
||||
func PodsServiceAccountMustEqualTo(tcp *kamajiv1alpha1.TenantControlPlane, sa *corev1.ServiceAccount) {
|
||||
saName := sa.GetName()
|
||||
Eventually(func() bool {
|
||||
tcpPods := &corev1.PodList{}
|
||||
err := k8sClient.List(context.Background(), tcpPods, client.MatchingLabels{
|
||||
"kamaji.clastix.io/name": tcp.GetName(),
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for _, pod := range tcpPods.Items {
|
||||
if pod.Spec.ServiceAccountName != saName {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}, 5*time.Minute, time.Second).Should(BeTrue())
|
||||
}
|
||||
|
||||
@@ -19,9 +19,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
kubeadmv1beta3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/cmd"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -44,7 +44,7 @@ var _ = Describe("starting a kind worker with kubeadm", func() {
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "NodePort",
|
||||
@@ -121,7 +121,7 @@ var _ = Describe("starting a kind worker with kubeadm", func() {
|
||||
var joinCommandBuffer *bytes.Buffer
|
||||
|
||||
By("generating kubeadm join command", func() {
|
||||
joinCommandBuffer = bytes.NewBuffer([]byte(""))
|
||||
joinCommandBuffer = bytes.NewBufferString("")
|
||||
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigFile.Name())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -129,7 +129,7 @@ var _ = Describe("starting a kind worker with kubeadm", func() {
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(cmd.RunCreateToken(joinCommandBuffer, clientset, "", util.DefaultInitConfiguration(), true, "", kubeconfigFile.Name())).ToNot(HaveOccurred())
|
||||
Expect(cmd.RunCreateToken(joinCommandBuffer, clientset, "", &kubeadmv1beta3.InitConfiguration{}, true, "", kubeconfigFile.Name())).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
By("executing the command in the worker node", func() {
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -39,7 +39,7 @@ var _ = Describe("validating kubeconfig", func() {
|
||||
Spec: kamajiv1alpha1.TenantControlPlaneSpec{
|
||||
ControlPlane: kamajiv1alpha1.ControlPlane{
|
||||
Deployment: kamajiv1alpha1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(1),
|
||||
Replicas: pointer.To(int32(1)),
|
||||
},
|
||||
Service: kamajiv1alpha1.ServiceSpec{
|
||||
ServiceType: "NodePort",
|
||||
|
||||
229
go.mod
229
go.mod
@@ -1,119 +1,119 @@
|
||||
module github.com/clastix/kamaji
|
||||
|
||||
go 1.18
|
||||
go 1.22.0
|
||||
|
||||
toolchain go1.22.1
|
||||
|
||||
require (
|
||||
github.com/JamesStewy/go-mysqldump v0.2.2
|
||||
github.com/blang/semver v3.5.1+incompatible
|
||||
github.com/go-logr/logr v1.2.3
|
||||
github.com/go-logr/logr v1.4.1
|
||||
github.com/go-pg/pg/v10 v10.10.6
|
||||
github.com/go-sql-driver/mysql v1.6.0
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/juju/mutex/v2 v2.0.0
|
||||
github.com/onsi/ginkgo/v2 v2.6.0
|
||||
github.com/onsi/gomega v1.24.1
|
||||
github.com/nats-io/nats.go v1.34.1
|
||||
github.com/onsi/ginkgo/v2 v2.17.1
|
||||
github.com/onsi/gomega v1.32.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.10.1
|
||||
github.com/testcontainers/testcontainers-go v0.13.0
|
||||
go.etcd.io/etcd/api/v3 v3.5.6
|
||||
go.etcd.io/etcd/client/v3 v3.5.6
|
||||
go.etcd.io/etcd/api/v3 v3.5.10
|
||||
go.etcd.io/etcd/client/v3 v3.5.10
|
||||
go.uber.org/automaxprocs v1.5.1
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0
|
||||
k8s.io/api v0.26.1
|
||||
k8s.io/apimachinery v0.26.1
|
||||
k8s.io/apiserver v0.26.1
|
||||
k8s.io/client-go v0.26.1
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0
|
||||
k8s.io/api v0.30.1
|
||||
k8s.io/apimachinery v0.30.1
|
||||
k8s.io/apiserver v0.30.1
|
||||
k8s.io/client-go v0.30.1
|
||||
k8s.io/cluster-bootstrap v0.0.0
|
||||
k8s.io/klog/v2 v2.80.1
|
||||
k8s.io/klog/v2 v2.120.1
|
||||
k8s.io/kubelet v0.0.0
|
||||
k8s.io/kubernetes v1.26.1
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
|
||||
sigs.k8s.io/controller-runtime v0.14.0
|
||||
k8s.io/kubernetes v1.30.1
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
||||
sigs.k8s.io/controller-runtime v0.17.1-0.20240416095710-67b27f27e514
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.99.0 // indirect
|
||||
cloud.google.com/go/storage v1.18.2 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.4.17 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.23 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.25 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20211216145620-d92e9ce0af51 // indirect
|
||||
github.com/containerd/cgroups v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/containerd/containerd v1.5.9 // indirect
|
||||
github.com/coredns/caddy v1.1.0 // indirect
|
||||
github.com/coredns/corefile-migration v1.0.17 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/coredns/caddy v1.1.1 // indirect
|
||||
github.com/coredns/corefile-migration v1.0.21 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.11+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-errors/errors v1.0.1 // indirect
|
||||
github.com/go-logr/zapr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-pg/zerochecker v0.2.0 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/lithammer/dedent v1.1.0 // indirect
|
||||
github.com/magiconair/properties v1.8.5 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||
github.com/moby/sys/mount v0.2.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/opencontainers/runc v1.1.12 // indirect
|
||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/afero v1.7.0 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.45.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/spf13/afero v1.9.2 // indirect
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
@@ -122,72 +122,77 @@ require (
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.4 // indirect
|
||||
github.com/vmihailenco/tagparser v0.1.2 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
github.com/xlab/treeprint v1.1.0 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.6 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.24.0 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/term v0.3.0 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 // indirect
|
||||
golang.org/x/mod v0.15.0 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/oauth2 v0.12.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/term v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/api v0.63.0 // indirect
|
||||
golang.org/x/tools v0.18.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
|
||||
google.golang.org/grpc v1.49.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/grpc v1.58.3 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.66.2 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.26.1 // indirect
|
||||
k8s.io/cli-runtime v0.26.1 // indirect
|
||||
k8s.io/component-base v0.26.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.30.1 // indirect
|
||||
k8s.io/cli-runtime v0.30.1 // indirect
|
||||
k8s.io/component-base v0.30.1 // indirect
|
||||
k8s.io/component-helpers v0.0.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
k8s.io/kube-proxy v0.0.0 // indirect
|
||||
k8s.io/system-validators v1.8.0 // indirect
|
||||
mellium.im/sasl v0.3.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.12.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
k8s.io/api => k8s.io/api v0.26.1
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.1
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.26.1
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.26.1
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.1
|
||||
k8s.io/client-go => k8s.io/client-go v0.26.1
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.1
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.1
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.26.1
|
||||
k8s.io/component-base => k8s.io/component-base v0.26.1
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.26.1
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.26.1
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.26.1
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.1
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.26.1
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.1
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.1
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.1
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.1
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.26.1
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.26.1
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.1
|
||||
k8s.io/metrics => k8s.io/metrics v0.26.1
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.26.1
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.1
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.1
|
||||
k8s.io/api => k8s.io/api v0.30.1
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.30.1
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.30.1
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.30.1
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.30.1
|
||||
k8s.io/client-go => k8s.io/client-go v0.30.1
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.30.1
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.30.1
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.30.1
|
||||
k8s.io/component-base => k8s.io/component-base v0.30.1
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.30.1
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.30.1
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.30.1
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.30.1
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.30.1
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.30.1
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.30.1
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.30.1
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.30.1
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.30.1
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.30.1
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.30.1
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.30.1
|
||||
k8s.io/metrics => k8s.io/metrics v0.30.1
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.30.1
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.30.1
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.30.1
|
||||
)
|
||||
|
||||
replace github.com/JamesStewy/go-mysqldump => github.com/vtoma/go-mysqldump v1.0.0
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
@@ -61,7 +61,8 @@ func (d Deployment) Build(ctx context.Context, deployment *appsv1.Deployment, te
|
||||
|
||||
d.setLabels(deployment, utilities.MergeMaps(utilities.KamajiLabels(tenantControlPlane.GetName(), "deployment"), tenantControlPlane.Spec.ControlPlane.Deployment.AdditionalMetadata.Labels))
|
||||
d.setAnnotations(deployment, utilities.MergeMaps(deployment.Annotations, tenantControlPlane.Spec.ControlPlane.Deployment.AdditionalMetadata.Annotations))
|
||||
d.setTemplateLabels(&deployment.Spec.Template, d.templateLabels(ctx, &tenantControlPlane))
|
||||
d.setTemplateLabels(&deployment.Spec.Template, utilities.MergeMaps(d.templateLabels(ctx, &tenantControlPlane), tenantControlPlane.Spec.ControlPlane.Deployment.PodAdditionalMetadata.Labels))
|
||||
d.setTemplateAnnotations(&deployment.Spec.Template, tenantControlPlane.Spec.ControlPlane.Deployment.PodAdditionalMetadata.Annotations)
|
||||
d.setNodeSelector(&deployment.Spec.Template.Spec, tenantControlPlane)
|
||||
d.setToleration(&deployment.Spec.Template.Spec, tenantControlPlane)
|
||||
d.setAffinity(&deployment.Spec.Template.Spec, tenantControlPlane)
|
||||
@@ -76,6 +77,7 @@ func (d Deployment) Build(ctx context.Context, deployment *appsv1.Deployment, te
|
||||
d.setContainers(&deployment.Spec.Template.Spec, tenantControlPlane, address)
|
||||
d.setAdditionalVolumes(&deployment.Spec.Template.Spec, tenantControlPlane)
|
||||
d.setVolumes(&deployment.Spec.Template.Spec, tenantControlPlane)
|
||||
d.setServiceAccount(&deployment.Spec.Template.Spec, tenantControlPlane)
|
||||
d.Client.Scheme().Default(deployment)
|
||||
}
|
||||
|
||||
@@ -224,7 +226,7 @@ func (d Deployment) buildPKIVolume(podSpec *corev1.PodSpec, tcp kamajiv1alpha1.T
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Projected: &corev1.ProjectedVolumeSource{
|
||||
Sources: sources,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -240,7 +242,7 @@ func (d Deployment) buildCAVolume(podSpec *corev1.PodSpec, tcp kamajiv1alpha1.Te
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.Certificates.CA.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -256,7 +258,7 @@ func (d Deployment) buildSSLCertsVolume(podSpec *corev1.PodSpec, tcp kamajiv1alp
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.Certificates.CA.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -272,7 +274,7 @@ func (d Deployment) buildShareCAVolume(podSpec *corev1.PodSpec, tcp kamajiv1alph
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.Certificates.CA.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -288,7 +290,7 @@ func (d Deployment) buildLocalShareCAVolume(podSpec *corev1.PodSpec, tcp kamajiv
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.Certificates.CA.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -304,7 +306,7 @@ func (d Deployment) buildSchedulerVolume(podSpec *corev1.PodSpec, tcp kamajiv1al
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.KubeConfig.Scheduler.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -320,7 +322,7 @@ func (d Deployment) buildControllerManagerVolume(podSpec *corev1.PodSpec, tcp ka
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.KubeConfig.ControllerManager.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -708,7 +710,7 @@ func (d Deployment) buildKubeAPIServerCommand(tenantControlPlane kamajiv1alpha1.
|
||||
}
|
||||
|
||||
switch d.DataStore.Spec.Driver {
|
||||
case kamajiv1alpha1.KineMySQLDriver, kamajiv1alpha1.KinePostgreSQLDriver:
|
||||
case kamajiv1alpha1.KineMySQLDriver, kamajiv1alpha1.KinePostgreSQLDriver, kamajiv1alpha1.KineNatsDriver:
|
||||
desiredArgs["--etcd-servers"] = "http://127.0.0.1:2379"
|
||||
case kamajiv1alpha1.EtcdDriver:
|
||||
httpsEndpoints := make([]string, 0, len(d.DataStore.Spec.Endpoints))
|
||||
@@ -727,7 +729,7 @@ func (d Deployment) buildKubeAPIServerCommand(tenantControlPlane kamajiv1alpha1.
|
||||
|
||||
// Order matters, here: extraArgs could try to overwrite some arguments managed by Kamaji and that would be crucial.
|
||||
// Adding as first element of the array of maps, we're sure that these overrides will be sanitized by our configuration.
|
||||
return utilities.MergeMaps(extraArgs, current, desiredArgs)
|
||||
return utilities.MergeMaps(current, desiredArgs, extraArgs)
|
||||
}
|
||||
|
||||
func (d Deployment) secretProjection(secretName, certKeyName, keyName string) *corev1.SecretProjection {
|
||||
@@ -776,7 +778,7 @@ func (d Deployment) buildKineVolume(podSpec *corev1.PodSpec, tcp kamajiv1alpha1.
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: tcp.Status.Storage.Certificate.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
// Adding the volume to read Kine certificates:
|
||||
@@ -803,7 +805,10 @@ func (d Deployment) removeKineContainers(podSpec *corev1.PodSpec) {
|
||||
|
||||
podSpec.Containers = containers
|
||||
}
|
||||
// Removing the kine init-container, if present
|
||||
d.removeKineInitContainers(podSpec)
|
||||
}
|
||||
|
||||
func (d Deployment) removeKineInitContainers(podSpec *corev1.PodSpec) {
|
||||
if found, index := utilities.HasNamedContainer(podSpec.InitContainers, kineInitContainerName); found {
|
||||
var initContainers []corev1.Container
|
||||
|
||||
@@ -821,45 +826,67 @@ func (d Deployment) buildKine(podSpec *corev1.PodSpec, tcp kamajiv1alpha1.Tenant
|
||||
|
||||
return
|
||||
}
|
||||
// Ensuring the init container required for kine is present:
|
||||
// a chmod is required for kine in order to read the certificates to connect to the secured datastore.
|
||||
found, index := utilities.HasNamedContainer(podSpec.InitContainers, kineInitContainerName)
|
||||
if !found {
|
||||
index = len(podSpec.InitContainers)
|
||||
podSpec.InitContainers = append(podSpec.InitContainers, corev1.Container{})
|
||||
|
||||
// Building kine arguments, taking in consideration the user-space ones if provided.
|
||||
args := map[string]string{}
|
||||
|
||||
if d.DataStore.Spec.TLSConfig != nil {
|
||||
// Ensuring the init container required for kine is present:
|
||||
// a chmod is required for kine in order to read the certificates to connect to the secured datastore.
|
||||
found, index := utilities.HasNamedContainer(podSpec.InitContainers, kineInitContainerName)
|
||||
if !found {
|
||||
index = len(podSpec.InitContainers)
|
||||
podSpec.InitContainers = append(podSpec.InitContainers, corev1.Container{})
|
||||
}
|
||||
|
||||
podSpec.InitContainers[index].Name = kineInitContainerName
|
||||
podSpec.InitContainers[index].Image = d.KineContainerImage
|
||||
podSpec.InitContainers[index].Command = []string{"sh"}
|
||||
|
||||
podSpec.InitContainers[index].Args = []string{
|
||||
"-c",
|
||||
"cp /kine/*.* /certs && chmod -R 600 /certs/*.*",
|
||||
}
|
||||
|
||||
podSpec.InitContainers[index].VolumeMounts = []corev1.VolumeMount{
|
||||
{
|
||||
Name: dataStoreCertsVolumeName,
|
||||
ReadOnly: true,
|
||||
MountPath: "/kine",
|
||||
},
|
||||
{
|
||||
Name: kineVolumeCertName,
|
||||
MountPath: "/certs",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}
|
||||
|
||||
args["--ca-file"] = "/certs/ca.crt"
|
||||
|
||||
if d.DataStore.Spec.TLSConfig.ClientCertificate != nil {
|
||||
args["--cert-file"] = "/certs/server.crt"
|
||||
args["--key-file"] = "/certs/server.key"
|
||||
}
|
||||
} else {
|
||||
// if no TLS configuration is provided, the kine initContainer must be removed.
|
||||
d.removeKineInitContainers(podSpec)
|
||||
}
|
||||
|
||||
podSpec.InitContainers[index].Name = kineInitContainerName
|
||||
podSpec.InitContainers[index].Image = d.KineContainerImage
|
||||
podSpec.InitContainers[index].Command = []string{"sh"}
|
||||
podSpec.InitContainers[index].Args = []string{
|
||||
"-c",
|
||||
"cp /kine/*.* /certs && chmod -R 600 /certs/*.*",
|
||||
}
|
||||
podSpec.InitContainers[index].VolumeMounts = []corev1.VolumeMount{
|
||||
{
|
||||
Name: dataStoreCertsVolumeName,
|
||||
ReadOnly: true,
|
||||
MountPath: "/kine",
|
||||
},
|
||||
{
|
||||
Name: kineVolumeCertName,
|
||||
MountPath: "/certs",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}
|
||||
// Kine is expecting an additional container, and it must be removed before proceeding with the additional one
|
||||
// in order to make this function idempotent.
|
||||
found, index = utilities.HasNamedContainer(podSpec.Containers, kineContainerName)
|
||||
found, index := utilities.HasNamedContainer(podSpec.Containers, kineContainerName)
|
||||
if !found {
|
||||
index = len(podSpec.Containers)
|
||||
podSpec.Containers = append(podSpec.Containers, corev1.Container{})
|
||||
}
|
||||
// Building kine arguments, taking in consideration the user-space ones if provided.
|
||||
args := map[string]string{}
|
||||
|
||||
if tcp.Spec.ControlPlane.Deployment.ExtraArgs != nil {
|
||||
args = utilities.ArgsFromSliceToMap(tcp.Spec.ControlPlane.Deployment.ExtraArgs.Kine)
|
||||
utilArgs := utilities.ArgsFromSliceToMap(tcp.Spec.ControlPlane.Deployment.ExtraArgs.Kine)
|
||||
|
||||
// Merging the user-space arguments with the Kamaji ones.
|
||||
for k, v := range utilArgs {
|
||||
args[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
switch d.DataStore.Spec.Driver {
|
||||
@@ -867,12 +894,10 @@ func (d Deployment) buildKine(podSpec *corev1.PodSpec, tcp kamajiv1alpha1.Tenant
|
||||
args["--endpoint"] = "mysql://$(DB_USER):$(DB_PASSWORD)@tcp($(DB_CONNECTION_STRING))/$(DB_SCHEMA)"
|
||||
case kamajiv1alpha1.KinePostgreSQLDriver:
|
||||
args["--endpoint"] = "postgres://$(DB_USER):$(DB_PASSWORD)@$(DB_CONNECTION_STRING)/$(DB_SCHEMA)"
|
||||
case kamajiv1alpha1.KineNatsDriver:
|
||||
args["--endpoint"] = "nats://$(DB_USER):$(DB_PASSWORD)@$(DB_CONNECTION_STRING)?bucket=$(DB_SCHEMA)&noEmbed"
|
||||
}
|
||||
|
||||
args["--ca-file"] = "/certs/ca.crt"
|
||||
args["--cert-file"] = "/certs/server.crt"
|
||||
args["--key-file"] = "/certs/server.key"
|
||||
|
||||
podSpec.Containers[index].Name = kineContainerName
|
||||
podSpec.Containers[index].Image = d.KineContainerImage
|
||||
podSpec.Containers[index].Command = []string{"/bin/kine"}
|
||||
@@ -933,7 +958,7 @@ func (d Deployment) setReplicas(deploymentSpec *appsv1.DeploymentSpec, tcp kamaj
|
||||
|
||||
func (d Deployment) setRuntimeClass(spec *corev1.PodSpec, tcp kamajiv1alpha1.TenantControlPlane) {
|
||||
if len(tcp.Spec.ControlPlane.Deployment.RuntimeClassName) > 0 {
|
||||
spec.RuntimeClassName = pointer.String(tcp.Spec.ControlPlane.Deployment.RuntimeClassName)
|
||||
spec.RuntimeClassName = pointer.To(tcp.Spec.ControlPlane.Deployment.RuntimeClassName)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -999,6 +1024,10 @@ func (d Deployment) setTemplateLabels(template *corev1.PodTemplateSpec, labels m
|
||||
template.SetLabels(labels)
|
||||
}
|
||||
|
||||
func (d Deployment) setTemplateAnnotations(template *corev1.PodTemplateSpec, annotations map[string]string) {
|
||||
template.SetAnnotations(annotations)
|
||||
}
|
||||
|
||||
func (d Deployment) setLabels(resource *appsv1.Deployment, labels map[string]string) {
|
||||
resource.SetLabels(labels)
|
||||
}
|
||||
@@ -1064,3 +1093,13 @@ func (d Deployment) setToleration(spec *corev1.PodSpec, tcp kamajiv1alpha1.Tenan
|
||||
func (d Deployment) setAffinity(spec *corev1.PodSpec, tcp kamajiv1alpha1.TenantControlPlane) {
|
||||
spec.Affinity = tcp.Spec.ControlPlane.Deployment.Affinity
|
||||
}
|
||||
|
||||
func (d Deployment) setServiceAccount(spec *corev1.PodSpec, tcp kamajiv1alpha1.TenantControlPlane) {
|
||||
if len(tcp.Spec.ControlPlane.Deployment.ServiceAccountName) > 0 {
|
||||
spec.ServiceAccountName = tcp.Spec.ControlPlane.Deployment.ServiceAccountName
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
spec.ServiceAccountName = "default"
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/internal/utilities"
|
||||
@@ -186,7 +186,7 @@ func (k Konnectivity) buildVolumeMounts(podSpec *corev1.PodSpec) {
|
||||
|
||||
podSpec.Containers[index].Args = utilities.ArgsFromMapToSlice(args)
|
||||
|
||||
vFound, vIndex := false, 0
|
||||
vFound, vIndex := false, 0 //nolint:wastedassign
|
||||
// Patching the volume mounts
|
||||
if vFound, vIndex = utilities.HasNamedVolumeMount(podSpec.Containers[index].VolumeMounts, konnectivityUDSVolume); !vFound {
|
||||
vIndex = len(podSpec.Containers[index].VolumeMounts)
|
||||
@@ -208,9 +208,8 @@ func (k Konnectivity) buildVolumeMounts(podSpec *corev1.PodSpec) {
|
||||
}
|
||||
|
||||
func (k Konnectivity) buildVolumes(status kamajiv1alpha1.KonnectivityStatus, podSpec *corev1.PodSpec) {
|
||||
found, index := false, 0
|
||||
// Defining volumes for the UDS socket
|
||||
found, index = utilities.HasNamedVolume(podSpec.Volumes, konnectivityUDSVolume)
|
||||
found, index := utilities.HasNamedVolume(podSpec.Volumes, konnectivityUDSVolume)
|
||||
if !found {
|
||||
index = len(podSpec.Volumes)
|
||||
podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{})
|
||||
@@ -235,7 +234,7 @@ func (k Konnectivity) buildVolumes(status kamajiv1alpha1.KonnectivityStatus, pod
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: status.ConfigMap.Name,
|
||||
},
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
// Defining volume for the Konnectivity kubeconfig
|
||||
@@ -249,7 +248,7 @@ func (k Konnectivity) buildVolumes(status kamajiv1alpha1.KonnectivityStatus, pod
|
||||
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: status.Kubeconfig.SecretName,
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,18 +21,26 @@ func NewStorageConnection(ctx context.Context, client client.Client, ds kamajiv1
|
||||
|
||||
switch ds.Spec.Driver {
|
||||
case kamajiv1alpha1.KineMySQLDriver:
|
||||
cc.TLSConfig.ServerName = cc.Endpoints[0].Host
|
||||
|
||||
if ds.Spec.TLSConfig != nil {
|
||||
cc.TLSConfig.ServerName = cc.Endpoints[0].Host
|
||||
}
|
||||
|
||||
cc.Parameters = map[string][]string{
|
||||
"multiStatements": {"true"},
|
||||
}
|
||||
|
||||
return NewMySQLConnection(*cc)
|
||||
case kamajiv1alpha1.KinePostgreSQLDriver:
|
||||
cc.TLSConfig.ServerName = cc.Endpoints[0].Host
|
||||
//nolint:contextcheck
|
||||
if ds.Spec.TLSConfig != nil {
|
||||
cc.TLSConfig.ServerName = cc.Endpoints[0].Host
|
||||
}
|
||||
|
||||
return NewPostgreSQLConnection(*cc)
|
||||
case kamajiv1alpha1.EtcdDriver:
|
||||
return NewETCDConnection(*cc)
|
||||
case kamajiv1alpha1.KineNatsDriver:
|
||||
return NewNATSConnection(*cc)
|
||||
default:
|
||||
return nil, fmt.Errorf("%s is not a valid driver", ds.Spec.Driver)
|
||||
}
|
||||
|
||||
@@ -36,29 +36,41 @@ type ConnectionConfig struct {
|
||||
}
|
||||
|
||||
func NewConnectionConfig(ctx context.Context, client client.Client, ds kamajiv1alpha1.DataStore) (*ConnectionConfig, error) {
|
||||
ca, err := ds.Spec.TLSConfig.CertificateAuthority.Certificate.GetContent(ctx, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var tlsConfig *tls.Config
|
||||
|
||||
if ds.Spec.TLSConfig != nil {
|
||||
ca, err := ds.Spec.TLSConfig.CertificateAuthority.Certificate.GetContent(ctx, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rootCAs := x509.NewCertPool()
|
||||
if ok := rootCAs.AppendCertsFromPEM(ca); !ok {
|
||||
return nil, fmt.Errorf("error create root CA for the DB connector")
|
||||
}
|
||||
|
||||
tlsConfig = &tls.Config{
|
||||
RootCAs: rootCAs,
|
||||
}
|
||||
}
|
||||
|
||||
crt, err := ds.Spec.TLSConfig.ClientCertificate.Certificate.GetContent(ctx, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ds.Spec.TLSConfig != nil && ds.Spec.TLSConfig.ClientCertificate != nil {
|
||||
crt, err := ds.Spec.TLSConfig.ClientCertificate.Certificate.GetContent(ctx, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key, err := ds.Spec.TLSConfig.ClientCertificate.PrivateKey.GetContent(ctx, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key, err := ds.Spec.TLSConfig.ClientCertificate.PrivateKey.GetContent(ctx, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rootCAs := x509.NewCertPool()
|
||||
if ok := rootCAs.AppendCertsFromPEM(ca); !ok {
|
||||
return nil, fmt.Errorf("error create root CA for the DB connector")
|
||||
}
|
||||
certificate, err := tls.X509KeyPair(crt, key)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot retrieve x.509 key pair from the Kine Secret")
|
||||
}
|
||||
|
||||
certificate, err := tls.X509KeyPair(crt, key)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot retrieve x.509 key pair from the Kine Secret")
|
||||
tlsConfig.Certificates = []tls.Certificate{certificate}
|
||||
}
|
||||
|
||||
var user, password string
|
||||
@@ -99,10 +111,7 @@ func NewConnectionConfig(ctx context.Context, client client.Client, ds kamajiv1a
|
||||
User: user,
|
||||
Password: password,
|
||||
Endpoints: eps,
|
||||
TLSConfig: &tls.Config{
|
||||
RootCAs: rootCAs,
|
||||
Certificates: []tls.Certificate{certificate},
|
||||
},
|
||||
TLSConfig: tlsConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -136,7 +136,7 @@ func (e *EtcdClient) DeleteDB(ctx context.Context, dbName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EtcdClient) RevokePrivileges(ctx context.Context, user, dbName string) error {
|
||||
func (e *EtcdClient) RevokePrivileges(ctx context.Context, _, dbName string) error {
|
||||
if _, err := e.Client.Auth.RoleDelete(ctx, dbName); err != nil {
|
||||
return errors.NewRevokePrivilegesError(err)
|
||||
}
|
||||
|
||||
@@ -112,12 +112,14 @@ func NewMySQLConnection(config ConnectionConfig) (Connection, error) {
|
||||
|
||||
tlsKey := "mysql"
|
||||
|
||||
if err = mysql.RegisterTLSConfig(tlsKey, config.TLSConfig); err != nil {
|
||||
return nil, err
|
||||
if config.TLSConfig != nil {
|
||||
if err = mysql.RegisterTLSConfig(tlsKey, config.TLSConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mysqlConfig.TLSConfig = tlsKey
|
||||
}
|
||||
|
||||
mysqlConfig.DBName = config.DBName
|
||||
mysqlConfig.TLSConfig = tlsKey
|
||||
parsedDSN := mysqlConfig.FormatDSN()
|
||||
|
||||
db, err := sql.Open("mysql", parsedDSN)
|
||||
@@ -216,13 +218,17 @@ func (c *MySQLConnection) DBExists(ctx context.Context, dbName string) (bool, er
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
func (c *MySQLConnection) GrantPrivilegesExists(ctx context.Context, user, dbName string) (bool, error) {
|
||||
func (c *MySQLConnection) GrantPrivilegesExists(_ context.Context, user, dbName string) (bool, error) {
|
||||
statementShowGrantsStatement := fmt.Sprintf(mysqlShowGrantsStatement, user)
|
||||
rows, err := c.db.Query(statementShowGrantsStatement)
|
||||
rows, err := c.db.Query(statementShowGrantsStatement) //nolint:sqlclosecheck
|
||||
if err != nil {
|
||||
return false, errors.NewGrantPrivilegesError(err)
|
||||
}
|
||||
|
||||
if err = rows.Err(); err != nil {
|
||||
return false, errors.NewGrantPrivilegesError(err)
|
||||
}
|
||||
|
||||
expected := fmt.Sprintf(mysqlGrantPrivilegesStatement, user, dbName)
|
||||
var grant string
|
||||
|
||||
|
||||
184
internal/datastore/nats.go
Normal file
184
internal/datastore/nats.go
Normal file
@@ -0,0 +1,184 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/nats-io/nats.go"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
|
||||
// NATSConnection represents a connection to a NATS KV store.
|
||||
type NATSConnection struct {
|
||||
js nats.JetStreamContext
|
||||
conn *nats.Conn
|
||||
config ConnectionConfig
|
||||
}
|
||||
|
||||
// NewNATSConnection initializes a connection to NATS and sets up the KV store.
|
||||
func NewNATSConnection(config ConnectionConfig) (*NATSConnection, error) {
|
||||
var endpoints string
|
||||
|
||||
if len(config.Endpoints) > 1 {
|
||||
// comma separated list of endpoints
|
||||
var ep []string
|
||||
for _, e := range config.Endpoints {
|
||||
ep = append(ep, fmt.Sprintf("nats://%s", e.String()))
|
||||
}
|
||||
|
||||
endpoints = strings.Join(ep, ",")
|
||||
} else {
|
||||
endpoints = fmt.Sprintf("nats://%s", config.Endpoints[0].String())
|
||||
}
|
||||
|
||||
var conn *nats.Conn
|
||||
var err error
|
||||
var natsOpts []nats.Option
|
||||
|
||||
if config.TLSConfig != nil {
|
||||
natsOpts = append(natsOpts, nats.Secure(config.TLSConfig))
|
||||
}
|
||||
|
||||
if config.User != "" && config.Password != "" {
|
||||
natsOpts = append(natsOpts, nats.UserInfo(config.User, config.Password))
|
||||
}
|
||||
|
||||
conn, err = nats.Connect(endpoints, natsOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
js, err := conn.JetStream()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &NATSConnection{
|
||||
js: js,
|
||||
conn: conn,
|
||||
config: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) CreateUser(_ context.Context, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) CreateDB(_ context.Context, dbName string) error {
|
||||
_, err := nc.js.CreateKeyValue(&nats.KeyValueConfig{Bucket: dbName})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to create the datastore")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) GrantPrivileges(_ context.Context, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) UserExists(_ context.Context, _ string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) DBExists(_ context.Context, dbName string) (bool, error) {
|
||||
_, err := nc.js.KeyValue(dbName)
|
||||
if err != nil {
|
||||
if errors.Is(err, nats.ErrBucketNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) GrantPrivilegesExists(_ context.Context, _, _ string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) DeleteUser(_ context.Context, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) DeleteDB(_ context.Context, dbName string) error {
|
||||
err := nc.js.DeleteKeyValue(dbName)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) RevokePrivileges(_ context.Context, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) GetConnectionString() string {
|
||||
return nc.config.Endpoints[0].String()
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) Close() error {
|
||||
return nc.conn.Drain()
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) Check(_ context.Context) error {
|
||||
status := nc.conn.Status()
|
||||
|
||||
if status != nats.CONNECTED {
|
||||
return errors.New("connection to NATS is not established")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) Driver() string {
|
||||
return string(kamajiv1alpha1.KineNatsDriver)
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) GetConfig() ConnectionConfig {
|
||||
return nc.config
|
||||
}
|
||||
|
||||
func (nc *NATSConnection) Migrate(ctx context.Context, tcp kamajiv1alpha1.TenantControlPlane, target Connection) error {
|
||||
targetClient := target.(*NATSConnection) //nolint:forcetypeassert
|
||||
dbName := tcp.Status.Storage.Setup.Schema
|
||||
|
||||
targetKv, err := targetClient.js.KeyValue(dbName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sourceKv, err := nc.js.KeyValue(dbName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := target.Check(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// copy all keys from source to target
|
||||
keys, err := sourceKv.Keys()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
entry, err := sourceKv.Get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = targetKv.Put(key, entry.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/go-pg/pg/v10"
|
||||
goerrors "github.com/pkg/errors"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/internal/datastore/errors"
|
||||
@@ -34,6 +35,7 @@ const (
|
||||
type PostgreSQLConnection struct {
|
||||
db *pg.DB
|
||||
connection ConnectionEndpoint
|
||||
rootUser string
|
||||
switchDatabaseFn func(dbName string) *pg.DB
|
||||
}
|
||||
|
||||
@@ -78,7 +80,7 @@ func (r *PostgreSQLConnection) Migrate(ctx context.Context, tcp kamajiv1alpha1.T
|
||||
// Dumping the old datastore in a local buffer
|
||||
var buf bytes.Buffer
|
||||
|
||||
if _, err := r.switchDatabaseFn(tcp.Status.Storage.Setup.Schema).WithContext(ctx).CopyTo(&buf, "COPY kine TO STDOUT"); err != nil { //nolint:contextcheck
|
||||
if _, err := r.switchDatabaseFn(tcp.Status.Storage.Setup.Schema).WithContext(ctx).CopyTo(&buf, "COPY kine TO STDOUT"); err != nil {
|
||||
return fmt.Errorf("unable to copy from the origin datastore: %w", err)
|
||||
}
|
||||
|
||||
@@ -114,6 +116,7 @@ func NewPostgreSQLConnection(config ConnectionConfig) (Connection, error) {
|
||||
return &PostgreSQLConnection{
|
||||
db: pg.Connect(opt),
|
||||
switchDatabaseFn: fn,
|
||||
rootUser: config.User,
|
||||
connection: config.Endpoints[0],
|
||||
}, nil
|
||||
}
|
||||
@@ -232,6 +235,10 @@ func (r *PostgreSQLConnection) DeleteUser(ctx context.Context, user string) erro
|
||||
}
|
||||
|
||||
func (r *PostgreSQLConnection) DeleteDB(ctx context.Context, dbName string) error {
|
||||
if err := r.GrantPrivileges(ctx, r.rootUser, dbName); err != nil {
|
||||
return errors.NewCannotDeleteDatabaseError(goerrors.Wrap(err, "cannot grant privileges to root user"))
|
||||
}
|
||||
|
||||
if _, err := r.db.ExecContext(ctx, fmt.Sprintf(postgresqlDropDBStatement, dbName)); err != nil {
|
||||
return errors.NewCannotDeleteDatabaseError(err)
|
||||
}
|
||||
|
||||
@@ -107,7 +107,7 @@ func GeneratePublicKeyPrivateKeyPair(baseName string, config *Configuration) (*P
|
||||
}
|
||||
|
||||
func initPhaseCertsSA(config *Configuration) error {
|
||||
return certs.CreateServiceAccountKeyAndPublicKeyFiles(config.InitConfiguration.CertificatesDir, config.InitConfiguration.PublicKeyAlgorithm())
|
||||
return certs.CreateServiceAccountKeyAndPublicKeyFiles(config.InitConfiguration.CertificatesDir, config.InitConfiguration.EncryptionAlgorithmType())
|
||||
}
|
||||
|
||||
func initPhaseFromCA(kubeadmCert *certs.KubeadmCert, config *Configuration, certificate *x509.Certificate, signer crypto.Signer) error {
|
||||
|
||||
@@ -64,9 +64,9 @@ func CreateKubeadmInitConfiguration(params Parameters) (*Configuration, error) {
|
||||
fmt.Sprintf("%s.%s.svc.cluster.local", params.TenantControlPlaneName, params.TenantControlPlaneNamespace),
|
||||
params.TenantControlPlaneAddress,
|
||||
}, params.TenantControlPlaneCertSANs...)
|
||||
conf.APIServer.ControlPlaneComponent.ExtraArgs = map[string]string{
|
||||
"etcd-compaction-interval": "0s",
|
||||
"etcd-prefix": fmt.Sprintf("/%s", params.TenantControlPlaneName),
|
||||
conf.APIServer.ControlPlaneComponent.ExtraArgs = []kubeadmapi.Arg{
|
||||
{Name: "etcd-compaction-interval", Value: "0s"},
|
||||
{Name: "etcd-prefix", Value: fmt.Sprintf("/%s", params.TenantControlPlaneName)},
|
||||
}
|
||||
conf.ClusterName = params.TenantControlPlaneName
|
||||
|
||||
|
||||
@@ -26,11 +26,8 @@ func buildCertificateDirectoryWithCA(ca CertificatePrivateKeyPair, directory str
|
||||
}
|
||||
|
||||
keyPath := path.Join(directory, kubeadmconstants.CAKeyName)
|
||||
if err := os.WriteFile(keyPath, ca.PrivateKey, os.FileMode(0o600)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return os.WriteFile(keyPath, ca.PrivateKey, os.FileMode(0o600))
|
||||
}
|
||||
|
||||
func CreateKubeconfig(kubeconfigName string, ca CertificatePrivateKeyPair, config *Configuration) ([]byte, error) {
|
||||
|
||||
@@ -11,28 +11,28 @@ import (
|
||||
|
||||
type Discard struct{}
|
||||
|
||||
func (d Discard) PrintObj(obj runtime.Object, writer io.Writer) error {
|
||||
func (d Discard) PrintObj(runtime.Object, io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d Discard) Fprintf(writer io.Writer, format string, args ...interface{}) (n int, err error) {
|
||||
func (d Discard) Fprintf(io.Writer, string, ...interface{}) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (d Discard) Fprintln(writer io.Writer, args ...interface{}) (n int, err error) {
|
||||
func (d Discard) Fprintln(io.Writer, ...interface{}) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (d Discard) Printf(format string, args ...interface{}) (n int, err error) {
|
||||
func (d Discard) Printf(string, ...interface{}) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (d Discard) Println(args ...interface{}) (n int, err error) {
|
||||
func (d Discard) Println(...interface{}) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (d Discard) Flush(writer io.Writer, last bool) {
|
||||
func (d Discard) Flush(io.Writer, bool) {
|
||||
}
|
||||
|
||||
func (d Discard) Close(writer io.Writer) {
|
||||
func (d Discard) Close(io.Writer) {
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
|
||||
"github.com/clastix/kamaji/internal/utilities"
|
||||
)
|
||||
@@ -81,10 +81,10 @@ func getKubeletConfigmapContent(kubeletConfiguration KubeletConfiguration) ([]by
|
||||
},
|
||||
Authentication: kubelettypes.KubeletAuthentication{
|
||||
Anonymous: kubelettypes.KubeletAnonymousAuthentication{
|
||||
Enabled: pointer.Bool(false),
|
||||
Enabled: pointer.To(false),
|
||||
},
|
||||
Webhook: kubelettypes.KubeletWebhookAuthentication{
|
||||
Enabled: pointer.Bool(true),
|
||||
Enabled: pointer.To(true),
|
||||
CacheTTL: zeroDuration,
|
||||
},
|
||||
X509: kubelettypes.KubeletX509Authentication{
|
||||
@@ -110,9 +110,9 @@ func getKubeletConfigmapContent(kubeletConfiguration KubeletConfiguration) ([]by
|
||||
EvictionPressureTransitionPeriod: zeroDuration,
|
||||
FileCheckFrequency: zeroDuration,
|
||||
HealthzBindAddress: "127.0.0.1",
|
||||
HealthzPort: pointer.Int32(10248),
|
||||
HealthzPort: pointer.To(int32(10248)),
|
||||
HTTPCheckFrequency: zeroDuration,
|
||||
ImageGCHighThresholdPercent: pointer.Int32(100),
|
||||
ImageGCHighThresholdPercent: pointer.To(int32(100)),
|
||||
NodeStatusUpdateFrequency: zeroDuration,
|
||||
NodeStatusReportFrequency: zeroDuration,
|
||||
RotateCertificates: true,
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
pointer "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
@@ -287,7 +287,7 @@ func (k *KubeProxy) mutateDaemonSet(ctx context.Context, tenantClient client.Cli
|
||||
ds.Spec.Template.Spec.Volumes[0].Name = k.daemonSet.Spec.Template.Spec.Volumes[0].Name
|
||||
ds.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap = &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{Name: k.daemonSet.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.Name},
|
||||
DefaultMode: pointer.Int32(420),
|
||||
DefaultMode: pointer.To(int32(420)),
|
||||
}
|
||||
|
||||
ds.Spec.Template.Spec.Volumes[1].Name = k.daemonSet.Spec.Template.Spec.Volumes[1].Name
|
||||
|
||||
@@ -105,7 +105,7 @@ func (r *CACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1
|
||||
}
|
||||
|
||||
if isValid {
|
||||
return nil
|
||||
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -80,79 +80,89 @@ func (r *Certificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1al
|
||||
return func() error {
|
||||
logger := log.FromContext(ctx, "resource", r.GetName())
|
||||
|
||||
ca, err := r.DataStore.Spec.TLSConfig.CertificateAuthority.Certificate.GetContent(ctx, r.Client)
|
||||
if err != nil {
|
||||
logger.Error(err, "cannot retrieve CA certificate content")
|
||||
if r.DataStore.Spec.TLSConfig != nil {
|
||||
ca, err := r.DataStore.Spec.TLSConfig.CertificateAuthority.Certificate.GetContent(ctx, r.Client)
|
||||
if err != nil {
|
||||
logger.Error(err, "cannot retrieve CA certificate content")
|
||||
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
r.resource.Data["ca.crt"] = ca
|
||||
r.resource.Data["ca.crt"] = ca
|
||||
|
||||
r.resource.SetLabels(utilities.MergeMaps(
|
||||
utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()),
|
||||
map[string]string{
|
||||
constants.ControllerLabelResource: "x509",
|
||||
},
|
||||
))
|
||||
r.resource.SetLabels(utilities.MergeMaps(
|
||||
utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName()),
|
||||
map[string]string{
|
||||
constants.ControllerLabelResource: "x509",
|
||||
},
|
||||
))
|
||||
|
||||
if err = ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme()); err != nil {
|
||||
logger.Error(err, "cannot set controller reference", "resource", r.GetName())
|
||||
if err = ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme()); err != nil {
|
||||
logger.Error(err, "cannot set controller reference", "resource", r.GetName())
|
||||
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if utilities.GetObjectChecksum(r.resource) == utilities.CalculateMapChecksum(r.resource.Data) {
|
||||
if r.DataStore.Spec.Driver == kamajiv1alpha1.EtcdDriver {
|
||||
if isValid, _ := crypto.IsValidCertificateKeyPairBytes(r.resource.Data["server.crt"], r.resource.Data["server.key"]); isValid {
|
||||
return nil
|
||||
if utilities.GetObjectChecksum(r.resource) == utilities.CalculateMapChecksum(r.resource.Data) {
|
||||
if r.DataStore.Spec.Driver == kamajiv1alpha1.EtcdDriver {
|
||||
if isValid, _ := crypto.IsValidCertificateKeyPairBytes(r.resource.Data["server.crt"], r.resource.Data["server.key"]); isValid {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var crt, key *bytes.Buffer
|
||||
|
||||
switch r.DataStore.Spec.Driver {
|
||||
case kamajiv1alpha1.EtcdDriver:
|
||||
var privateKey []byte
|
||||
// When dealing with the etcd storage we cannot use the basic authentication, thus the generation of a
|
||||
// certificate used for authentication is mandatory, along with the CA private key.
|
||||
if privateKey, err = r.DataStore.Spec.TLSConfig.CertificateAuthority.PrivateKey.GetContent(ctx, r.Client); err != nil {
|
||||
logger.Error(err, "unable to retrieve CA private key content")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if crt, key, err = crypto.GenerateCertificatePrivateKeyPair(crypto.NewCertificateTemplate(tenantControlPlane.Status.Storage.Setup.User), ca, privateKey); err != nil {
|
||||
logger.Error(err, "unable to generate certificate and private key")
|
||||
|
||||
return err
|
||||
}
|
||||
case kamajiv1alpha1.KineMySQLDriver, kamajiv1alpha1.KinePostgreSQLDriver, kamajiv1alpha1.KineNatsDriver:
|
||||
var crtBytes, keyBytes []byte
|
||||
// For the SQL drivers we just need to copy the certificate, since the basic authentication is used
|
||||
// to connect to the desired schema and database.
|
||||
|
||||
if r.DataStore.Spec.TLSConfig.ClientCertificate != nil {
|
||||
if crtBytes, err = r.DataStore.Spec.TLSConfig.ClientCertificate.Certificate.GetContent(ctx, r.Client); err != nil {
|
||||
logger.Error(err, "unable to retrieve certificate content")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
crt = bytes.NewBuffer(crtBytes)
|
||||
|
||||
if keyBytes, err = r.DataStore.Spec.TLSConfig.ClientCertificate.PrivateKey.GetContent(ctx, r.Client); err != nil {
|
||||
logger.Error(err, "unable to retrieve private key content")
|
||||
|
||||
return err
|
||||
}
|
||||
key = bytes.NewBuffer(keyBytes)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unrecognized driver for Certificate generation")
|
||||
}
|
||||
|
||||
if r.DataStore.Spec.TLSConfig.ClientCertificate != nil {
|
||||
r.resource.Data["server.crt"] = crt.Bytes()
|
||||
r.resource.Data["server.key"] = key.Bytes()
|
||||
}
|
||||
} else {
|
||||
// set r.resource.Data to empty to allow switching from TLS to non-tls
|
||||
r.resource.Data = map[string][]byte{}
|
||||
}
|
||||
|
||||
var crt, key *bytes.Buffer
|
||||
|
||||
switch r.DataStore.Spec.Driver {
|
||||
case kamajiv1alpha1.EtcdDriver:
|
||||
var privateKey []byte
|
||||
// When dealing with the etcd storage we cannot use the basic authentication, thus the generation of a
|
||||
// certificate used for authentication is mandatory, along with the CA private key.
|
||||
if privateKey, err = r.DataStore.Spec.TLSConfig.CertificateAuthority.PrivateKey.GetContent(ctx, r.Client); err != nil {
|
||||
logger.Error(err, "unable to retrieve CA private key content")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if crt, key, err = crypto.GenerateCertificatePrivateKeyPair(crypto.NewCertificateTemplate(tenantControlPlane.Status.Storage.Setup.User), ca, privateKey); err != nil {
|
||||
logger.Error(err, "unable to generate certificate and private key")
|
||||
|
||||
return err
|
||||
}
|
||||
case kamajiv1alpha1.KineMySQLDriver, kamajiv1alpha1.KinePostgreSQLDriver:
|
||||
var crtBytes, keyBytes []byte
|
||||
// For the SQL drivers we just need to copy the certificate, since the basic authentication is used
|
||||
// to connect to the desired schema and database.
|
||||
if crtBytes, err = r.DataStore.Spec.TLSConfig.ClientCertificate.Certificate.GetContent(ctx, r.Client); err != nil {
|
||||
logger.Error(err, "unable to retrieve certificate content")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
crt = bytes.NewBuffer(crtBytes)
|
||||
|
||||
if keyBytes, err = r.DataStore.Spec.TLSConfig.ClientCertificate.PrivateKey.GetContent(ctx, r.Client); err != nil {
|
||||
logger.Error(err, "unable to retrieve private key content")
|
||||
|
||||
return err
|
||||
}
|
||||
key = bytes.NewBuffer(keyBytes)
|
||||
default:
|
||||
return fmt.Errorf("unrecognized driver for Certificate generation")
|
||||
}
|
||||
|
||||
r.resource.Data["server.crt"] = crt.Bytes()
|
||||
r.resource.Data["server.key"] = key.Bytes()
|
||||
|
||||
utilities.SetObjectChecksum(r.resource, r.resource.Data)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -43,7 +43,7 @@ func (d *Migrate) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1
|
||||
|
||||
d.job = &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("migrate-%s-%s", tenantControlPlane.GetNamespace(), tenantControlPlane.GetName()),
|
||||
Name: fmt.Sprintf("migrate-%s", tenantControlPlane.UID),
|
||||
Namespace: d.KamajiNamespace,
|
||||
},
|
||||
}
|
||||
@@ -64,11 +64,8 @@ func (d *Migrate) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1
|
||||
}
|
||||
|
||||
d.desiredDatastore = &kamajiv1alpha1.DataStore{}
|
||||
if err := d.Client.Get(ctx, types.NamespacedName{Name: tenantControlPlane.Spec.DataStore}, d.desiredDatastore); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return d.Client.Get(ctx, types.NamespacedName{Name: tenantControlPlane.Spec.DataStore}, d.desiredDatastore)
|
||||
}
|
||||
|
||||
func (d *Migrate) ShouldCleanup(tcp *kamajiv1alpha1.TenantControlPlane) bool {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user