Compare commits

..

76 Commits

Author SHA1 Message Date
Dario Tranchitella
78ef34c9d6 fix(docs): aligning to latest changes for the chart documentation 2022-11-19 11:07:37 +01:00
Matteo Ruina
16d8b2d701 fix(helm): support installation on EKS 2022-11-18 16:50:00 +01:00
Dario Tranchitella
68764be716 chore(helm): support installation using --wait option 2022-10-22 09:47:08 +02:00
Dario Tranchitella
b594b598b1 chore(helm)!: tcp pod advanced scheduling 2022-10-21 14:39:24 +02:00
Dario Tranchitella
c8ce212730 chore(kustomize): tcp pod advanced scheduling 2022-10-21 14:39:24 +02:00
Dario Tranchitella
714b173132 docs: tcp pod advanced scheduling 2022-10-21 14:39:24 +02:00
Dario Tranchitella
0217d579d6 feat: tcp pod advanced scheduling 2022-10-21 14:39:24 +02:00
Dario Tranchitella
c242f4ac58 api!: tcp pod advanced scheduling 2022-10-21 14:39:24 +02:00
Dario Tranchitella
d4d25a8a05 chore(makefile): golint recipe 2022-10-21 14:39:24 +02:00
maxgio92
cff7f7c4e5 Refactor documentation and provide a website (#173) 2022-10-20 09:57:54 +02:00
Dario Tranchitella
6c817fd7ab fix(helm): kubeversion constraint 2022-10-12 11:27:45 +02:00
Massimiliano Giovagnoli
d31ada4da6 docs: add link to env file for admin cluster setup
Signed-off-by: Massimiliano Giovagnoli <me@maxgio.it>
2022-10-12 10:20:13 +02:00
Massimiliano Giovagnoli
ee01f721d2 docs: add link script for joining nodes setup
Signed-off-by: Massimiliano Giovagnoli <me@maxgio.it>
2022-10-11 17:48:06 +02:00
bsctl
912e010363 docs: add cncf conformance logo 2022-10-06 10:18:18 +02:00
bsctl
e2b03ca873 docs: add cncf conformance logo 2022-10-06 10:18:18 +02:00
Adriano Pezzuto
dccf7bd540 chore(helm): update metadata to helm chart 2022-10-05 09:47:37 +02:00
bsctl
25a65a7496 fix(docs): add logo in svg format 2022-09-23 19:32:16 +02:00
Dario Tranchitella
1ff03246c6 chore(helm): bumping to v0.1.0 2022-09-19 11:43:51 +02:00
Dario Tranchitella
8335f645a5 chore(kustomize): bumping to v0.1.0 2022-09-19 11:43:51 +02:00
Dario Tranchitella
70a791be74 chore(makefile): bumping to v0.1.0 2022-09-19 11:43:51 +02:00
bsctl
b0293c23b5 fix(docs): minor improvement 2022-09-16 20:36:49 +02:00
bsctl
50bba9bb2e fix(docs): deploy tenant nodes on separate subnet 2022-09-16 20:36:49 +02:00
bsctl
f05f7eaf07 fix(docs): remove outdated manifests 2022-09-16 20:36:49 +02:00
bsctl
bfd34ef47e fix(docs): minor improvements 2022-09-16 20:36:49 +02:00
bsctl
b73c7a20ed fix(docs): update roadmap in readme 2022-09-16 20:36:49 +02:00
bsctl
004441e77e fix(docs): use default md style for api reference 2022-09-16 20:36:49 +02:00
bsctl
0f85b6c534 fix(docs): wrong links in readme 2022-09-16 20:36:49 +02:00
bsctl
b674738f0d fix(docs): pin always the kubeadm versions 2022-09-16 20:36:49 +02:00
bsctl
6dc3cd1876 fix(docs): set requirements on kubeadm version 2022-09-16 20:36:49 +02:00
bsctl
96a57fefa5 refactor(docs): track new features and improvements 2022-09-16 20:36:49 +02:00
Dario Tranchitella
87b6f75f66 chore(ci): check helm non committed changes 2022-09-14 11:23:11 +02:00
Dario Tranchitella
1b24806fa3 fix(helm): protocol is not required for external etcd endpoints 2022-09-14 11:23:11 +02:00
Dario Tranchitella
f32ba4a76b fix(makefile): missing namespaces for postgresql kine setup 2022-09-14 11:23:11 +02:00
Dario Tranchitella
19d91aa4d2 chore(log): silencing klog 2022-09-14 11:23:11 +02:00
Dario Tranchitella
a4e2ac24ac fix(kustomize): installing default datastore with proper endpoints 2022-09-14 11:23:11 +02:00
Dario Tranchitella
0dffd9ba46 fix(datastore): default as name for the common datastore 2022-09-14 11:23:11 +02:00
Dario Tranchitella
90b2ca1bab fix(konnectivity): clean-up upon toggling addon
The TCP Deployment container kube-apiserver is deeply hacked with extra
details for konnectivity: most of them weren't cleaned-up properly, and
the function wasn't entirely idempotent in toggling the feature.

This fix is addressing this situation, and rearranging the code
according to the latest polish.
2022-09-12 09:38:36 +02:00
Dario Tranchitella
df8ca7c1d1 refactor: checksum for configmap and secret data 2022-09-12 09:38:36 +02:00
Dario Tranchitella
65519d4f22 refactor: using kamaji prefix for checksum annotation 2022-09-12 09:38:36 +02:00
Dario Tranchitella
e0fa8169f1 refactor: wrapping datastore errors 2022-09-12 09:38:36 +02:00
Dario Tranchitella
41eddc0462 refactor(crypto): eliminating bloated certs functions 2022-09-12 09:38:36 +02:00
Dario Tranchitella
1a9a8a1854 refactor: decoding kubeconfig with less bloated funcs 2022-09-12 09:38:36 +02:00
Dario Tranchitella
0c8a16d604 refactor(utils): encode to yaml uses the non deprecated serializer 2022-09-12 09:38:36 +02:00
Dario Tranchitella
b7adb314ad refactor: logging errors with stacktrace
Using the log facade and logging the error directly in the resource
handler we're getting a more detailed overview of the errors, along with
other metadata useful to understand quicker where the reconciliation
failed.
2022-09-12 09:38:36 +02:00
Dario Tranchitella
e55e6cfdd4 chore(golangci-lint): enabling interfacer and updating code 2022-09-12 09:38:36 +02:00
Dario Tranchitella
6388bf0a7f chore(golangci-lint): enabling used linters 2022-09-12 09:38:36 +02:00
Dario Tranchitella
e089f0ad9a chore: pointer.Int32Ptr is deprecated in favor of pointer.Int32 2022-09-12 09:38:36 +02:00
Dario Tranchitella
0b0bf09813 feat: seeding at startup 2022-09-12 09:38:36 +02:00
Dario Tranchitella
00ea4a562d refactor: moving cert functions to datastore resource 2022-09-12 09:38:36 +02:00
Dario Tranchitella
2a33844c68 refactor(utilities): decreasing bloating functions 2022-09-12 09:38:36 +02:00
Dario Tranchitella
606926ec9a refactor: go simple kubeconfig check 2022-09-12 09:38:36 +02:00
Dario Tranchitella
84b70b3b59 fix: check service-account certificate hash for reconciliation 2022-09-12 09:38:36 +02:00
Dario Tranchitella
4ca79ceb4c fix(helm)!: wrong path for scale spec path 2022-09-10 09:54:12 +02:00
Dario Tranchitella
8df8aa445a fix(kustomize)!: wrong path for scale spec path 2022-09-10 09:54:12 +02:00
Dario Tranchitella
8da916b5cd fix: wrong path for scale spec path 2022-09-10 09:54:12 +02:00
Dario Tranchitella
f15eeebe02 chore(gh): ensure to use go 1.18 for golangci-lint 2022-09-09 17:00:20 +02:00
Dario Tranchitella
7002d48ef9 fix(upgrade): minor release upgrades are allowed 2022-09-09 17:00:20 +02:00
Dario Tranchitella
79edd2606a refactor(kubeadm)!: updating code according to latest changes
Starting from this change, all the nodes trying to join a Kamaji TCP
must be initiated with kubeadm >= 1.25. This is not a hard-prerequisite
since a previous Kubernetes version can be used by specifying it in the
ClusterConfiguration kubernetesVersion field.
2022-09-09 17:00:20 +02:00
Dario Tranchitella
650c20be2b fix(deps): upgrading kubeadm to 1.25.0 2022-09-09 17:00:20 +02:00
Dario Tranchitella
7862717772 refactor: using constants for front-proxy common name 2022-09-09 17:00:10 +02:00
Dario Tranchitella
08eed7b244 fix: --etcd-compaction-interval flag is required for TCP API Server 2022-09-09 17:00:10 +02:00
Dario Tranchitella
1a561758b6 fix: service account issuer must be kubernetes.default.svc 2022-09-09 09:11:43 +02:00
Dario Tranchitella
12f12832f7 fix(kube-apiserver): required flag requestheader-client-ca-file 2022-09-06 19:20:40 +02:00
Dario Tranchitella
b4d0f9b698 chore(helm): adding scale subresource 2022-09-06 16:31:42 +02:00
Dario Tranchitella
14624af093 chore(kustomize)!: adding scale subresource 2022-09-06 16:31:42 +02:00
Dario Tranchitella
52cdc90b48 feat: adding scale subresource 2022-09-06 16:31:42 +02:00
Dario Tranchitella
fbb6e4eec5 chore(helm)!: repository and version override for addons 2022-09-02 14:38:46 +02:00
Dario Tranchitella
880a29f543 chore(kustomize)!: repository and version override for addons 2022-09-02 14:38:46 +02:00
Dario Tranchitella
b0b4ef95c6 feat: repository and version override for addons 2022-09-02 14:38:46 +02:00
Dario Tranchitella
bd909d6567 refactor(docs): updating repository and tag for konnectivity addon 2022-08-31 23:36:58 +02:00
Dario Tranchitella
fcc10c95b2 chore(helm): updating repository and tag 2022-08-31 23:36:58 +02:00
Dario Tranchitella
7e912ed2e8 chore(kustomize): updating repository and tag 2022-08-31 23:36:58 +02:00
Dario Tranchitella
2374176faf refactor(konnectivity): updating repository and tag 2022-08-31 23:36:58 +02:00
Dario Tranchitella
aceeced53a chore(helm)!: support for topology spread constraints 2022-08-31 23:35:54 +02:00
Dario Tranchitella
53c9102ef3 chore(kustomize)!: support for topology spread constraints 2022-08-31 23:35:54 +02:00
Dario Tranchitella
15e1cf7d80 feat: support for topology spread constraints 2022-08-31 23:35:54 +02:00
120 changed files with 10926 additions and 7023 deletions

View File

@@ -12,11 +12,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '1.18'
check-latest: true
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v2.3.0
uses: golangci/golangci-lint-action@v3.2.0
with:
version: v1.49.0
only-new-issues: false
@@ -28,9 +29,10 @@ jobs:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '1.18'
check-latest: true
- run: make yaml-installation-file
- name: Checking if YAML installer file is not aligned
run: if [[ $(git diff | wc -l) -gt 0 ]]; then echo ">>> Untracked generated files have not been committed" && git --no-pager diff && exit 1; fi

View File

@@ -34,9 +34,10 @@ jobs:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '1.18'
check-latest: true
- run: |
sudo apt-get update
sudo apt-get install -y golang-cfssl

View File

@@ -8,6 +8,16 @@ on:
branches: [ "*" ]
jobs:
diff:
name: diff
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- run: make -C charts/kamaji docs
- name: Checking if Helm docs is not aligned
run: if [[ $(git diff | wc -l) -gt 0 ]]; then echo ">>> Untracked changes have not been committed" && git --no-pager diff && exit 1; fi
lint:
runs-on: ubuntu-latest
steps:

View File

@@ -14,9 +14,6 @@ linters:
- wrapcheck
- gomnd
- scopelint
- golint
- interfacer
- maligned
- varnamelen
- testpackage
- tagliatelle
@@ -38,6 +35,14 @@ linters:
- gochecknoinits
- funlen
- dupl
- maintidx
- cyclop
# deprecated linters
- deadcode
- golint
- interfacer
- structcheck
- varcheck
- nosnakecase
- ifshort
- maligned
enable-all: true

View File

@@ -3,7 +3,7 @@
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
VERSION ?= 0.0.1
VERSION ?= 0.1.1
# CHANNELS define the bundle channels used in the bundle.
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
@@ -36,7 +36,7 @@ IMAGE_TAG_BASE ?= clastix.io/operator
BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION)
# Image URL to use all building/pushing image targets
IMG ?= clastix/kamaji:latest
IMG ?= clastix/kamaji:v$(VERSION)
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
@@ -87,10 +87,18 @@ CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.9.2)
GOLANGCI_LINT = $(shell pwd)/bin/golangci-lint
golangci-lint: ## Download golangci-lint locally if necessary.
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.49.0)
KUSTOMIZE = $(shell pwd)/bin/kustomize
kustomize: ## Download kustomize locally if necessary.
$(call install-kustomize,$(KUSTOMIZE),3.8.7)
APIDOCS_GEN = $(shell pwd)/bin/crdoc
apidocs-gen: ## Download crdoc locally if necessary.
$(call go-install-tool,$(APIDOCS_GEN),fybrik.io/crdoc@latest)
##@ Development
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
@@ -101,6 +109,9 @@ manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and Cust
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
golint: golangci-lint ## Linting the code according to the styling guide.
$(GOLANGCI_LINT) run -c .golangci.yml
test:
go test ./... -coverprofile cover.out
@@ -245,3 +256,8 @@ env:
e2e: env load helm ginkgo ## Create a KinD cluster, install Kamaji on it and run the test suite.
$(HELM) upgrade --debug --install kamaji ./charts/kamaji --create-namespace --namespace kamaji-system --set "image.pullPolicy=Never"
$(GINKGO) -v ./e2e
##@ Document
apidoc: apidocs-gen
$(APIDOCS_GEN) crdoc --resources config/crd/bases --output docs/content/reference/api.md --template docs/templates/reference-cr.tmpl

View File

@@ -20,39 +20,14 @@ Global hyper-scalers are leading the Managed Kubernetes space, while other cloud
**Kamaji** aims to solve these pains by leveraging multi-tenancy and simplifying how to run multiple control planes on the same infrastructure with a fraction of the operational burden.
## How it works
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. What makes Kamaji special is that Control Planes of _“tenant clusters”_ are just regular pods running in the _“admin cluster”_ instead of dedicated Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate. View [Core Concepts](./docs/concepts.md) for a deeper understanding of principles behind Kamaji's design.
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. What makes Kamaji special is that Control Planes of _“tenant clusters”_ are just regular pods running in the _“admin cluster”_ instead of dedicated Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate.
<p align="center">
<img src="assets/kamaji-light.png#gh-light-mode-only" />
</p>
<p align="center">
<img src="assets/kamaji-dark.png#gh-dark-mode-only" />
</p>
All the tenant clusters built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves.
<p align="center">
<img src="assets/screenshot.png" />
</p>
![Architecture](docs/content/images/kamaji-light.png#gh-light-mode-only)
![Architecture](docs/content/images/kamaji-dark.png#gh-dark-mode-only)
## Getting started
Please refer to the [Getting Started guide](./docs/getting-started-with-kamaji.md) to deploy a minimal setup of Kamaji on KinD.
> This project is still in the early development stage which means it's not ready for production as APIs, commands, flags, etc. are subject to change, but also that your feedback can still help to shape it. Please try it out and let us know what you like, dislike, what works, what doesn't, etc.
## Use cases
Kamaji project has been initially started as a solution for actual and common problems such as minimizing the Total Cost of Ownership while running Kubernetes at large scale. However, it can open a wider range of use cases.
Here are a few:
- **Managed Kubernetes:** enable companies to provide Cloud Native Infrastructure with ease by introducing a strong separation of concerns between management and workloads. Centralize clusters management, monitoring, and observability by leaving developers to focus on applications, increase productivity and reduce operational costs.
- **Kubernetes as a Service:** provide Kubernetes clusters in a self-service fashion by running management and workloads on different infrastructures with the option of Bring Your Own Device, BYOD.
- **Control Plane as a Service:** provide multiple Kubernetes control planes running on top of a single Kubernetes cluster. Tenants who use namespaces based isolation often still need access to cluster wide resources like Cluster Roles, Admission Webhooks, or Custom Resource Definitions.
- **Edge Computing:** distribute Kubernetes workloads across edge computing locations without having to manage multiple clusters across various providers. Centralize management of hundreds of control planes while leaving workloads to run isolated on their own dedicated infrastructure.
- **Cluster Simulation:** check new Kubernetes API or experimental flag or a new tool without impacting production operations. Kamaji will let you simulate such things in a safe and controlled environment.
- **Workloads Testing:** check the behaviour of your workloads on different and multiple versions of Kubernetes with ease by deploying multiple Control Planes in a single cluster.
Please refer to the [Getting Started guide](https://kamaji.clastix.io/getting-started/) to deploy a minimal setup of Kamaji on KinD.
## Features
@@ -74,37 +49,16 @@ Here are a few:
- [ ] Custom Prometheus metrics for monitoring and alerting
- [x] `kine` integration for MySQL as datastore
- [x] `kine` integration for PostgreSQL as datastore
- [ ] Deeper `kubeadm` integration
- [ ] Pooling of multiple `etcd` datastores
- [x] Pool of multiple datastores
- [ ] Automatic assigning of Tenant Control Plane to a datastore
- [ ] Autoscaling of Tenant Control Plane pods
## Documentation
Please, check the project's [documentation](./docs/) for getting started with Kamaji.
Please, check the project's [documentation](https://kamaji.clastix.io/) for getting started with Kamaji.
## Contributions
Kamaji is Open Source with Apache 2 license and any contribution is welcome.
## Community
Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
## FAQs
Q. What does Kamaji means?
A. Kamaji is named as the character _Kamaji_ from the Japanese movie [_Spirited Away_](https://en.wikipedia.org/wiki/Spirited_Away).
Q. Is Kamaji another Kubernetes distribution?
A. No, Kamaji is a Kubernetes Operator you can install on top of any Kubernetes cluster to provide hundreds of managed Kubernetes clusters as a service. We tested Kamaji on vanilla Kubernetes 1.22+, KinD, and Azure AKS. We expect it to work smoothly on other Kubernetes distributions. The tenant clusters made with Kamaji are conformant CNCF Kubernetes clusters as we leverage on [`kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
Q. Is it safe to run Kubernetes control plane components in a pod instead of dedicated virtual machines?
A. Yes, the tenant control plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
Q. You already provide a Kubernetes multi-tenancy solution with [Capsule](https://capsule.clastix.io). Why does Kamaji matter?
A. A multi-tenancy solution, like Capsule shares the Kubernetes control plane among all tenants keeping tenant namespaces isolated by policies. While the solution is the right choice by balancing between features and ease of usage, there are cases where a tenant user requires access to the control plane, for example, when a tenant requires to manage CRDs on his own. With Kamaji, you can provide cluster admin permissions to the tenant.
Q. Well you convinced me, how to get a try?
A. It is possible to get started with Kamaji on a laptop with [KinD](./docs/getting-started-with-kamaji.md) installed.
Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.

View File

@@ -208,6 +208,8 @@ type KubernetesVersion struct {
// KubernetesDeploymentStatus defines the status for the Tenant Control Plane Deployment in the management cluster.
type KubernetesDeploymentStatus struct {
appsv1.DeploymentStatus `json:",inline"`
// Selector is the label selector used to group the Tenant Control Plane Pods used by the scale subresource.
Selector string `json:"selector"`
// The name of the Deployment for the given cluster.
Name string `json:"name"`
// The namespace which the Deployment for the given cluster is deployed.

View File

@@ -85,6 +85,21 @@ type ControlPlaneComponentsResources struct {
type DeploymentSpec struct {
// +kubebuilder:default=2
Replicas int32 `json:"replicas,omitempty"`
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// If specified, the Tenant Control Plane pod's tolerations.
// More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// If specified, the Tenant Control Plane pod's scheduling constraints.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// TopologySpreadConstraints describes how the Tenant Control Plane pods ought to spread across topology
// domains. Scheduler will schedule pods in a way which abides by the constraints.
// In case of nil underlying LabelSelector, the Kamaji one for the given Tenant Control Plane will be used.
// All topologySpreadConstraints are ANDed.
TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
// Resources defines the amount of memory and CPU to allocate to each component of the Control Plane
// (kube-apiserver, controller-manager, and scheduler).
Resources *ControlPlaneComponentsResources `json:"resources,omitempty"`
@@ -110,12 +125,17 @@ type ServiceSpec struct {
}
// AddonSpec defines the spec for every addon.
type AddonSpec struct{}
type AddonSpec struct {
ImageOverrideTrait `json:",inline"`
}
type KubeProxySpec struct {
// Specify the image overried of the kube-proxy to install in the Tenant Cluster.
// If not specified, the Kubernetes default one will be used, according to the specified version.
ImageOverride string `json:"imageOverride,omitempty"`
type ImageOverrideTrait struct {
// ImageRepository sets the container registry to pull images from.
// if not set, the default ImageRepository will be used instead.
ImageRepository string `json:"imageRepository,omitempty"`
// ImageTag allows to specify a tag for the image.
// In case this value is set, kubeadm does not change automatically the version of the above components during upgrades.
ImageTag string `json:"imageTag,omitempty"`
}
// KonnectivitySpec defines the spec for Konnectivity.
@@ -123,13 +143,13 @@ type KonnectivitySpec struct {
// Port of Konnectivity proxy server.
ProxyPort int32 `json:"proxyPort"`
// Version for Konnectivity server and agent.
// +kubebuilder:default=v0.0.31
// +kubebuilder:default=v0.0.32
Version string `json:"version,omitempty"`
// ServerImage defines the container image for Konnectivity's server.
// +kubebuilder:default=us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-server
// +kubebuilder:default=registry.k8s.io/kas-network-proxy/proxy-server
ServerImage string `json:"serverImage,omitempty"`
// AgentImage defines the container image for Konnectivity's agent.
// +kubebuilder:default=us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent
// +kubebuilder:default=registry.k8s.io/kas-network-proxy/proxy-agent
AgentImage string `json:"agentImage,omitempty"`
// Resources define the amount of CPU and memory to allocate to the Konnectivity server.
Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
@@ -137,9 +157,14 @@ type KonnectivitySpec struct {
// AddonsSpec defines the enabled addons and their features.
type AddonsSpec struct {
CoreDNS *AddonSpec `json:"coreDNS,omitempty"`
// Enables the DNS addon in the Tenant Cluster.
// The registry and the tag are configurable, the image is hard-coded to `coredns`.
CoreDNS *AddonSpec `json:"coreDNS,omitempty"`
// Enables the Konnectivity addon in the Tenant Cluster, required if the worker nodes are in a different network.
Konnectivity *KonnectivitySpec `json:"konnectivity,omitempty"`
KubeProxy *KubeProxySpec `json:"kubeProxy,omitempty"`
// Enables the kube-proxy addon in the Tenant Cluster.
// The registry and the tag are configurable, the image is hard-coded to `kube-proxy`.
KubeProxy *AddonSpec `json:"kubeProxy,omitempty"`
}
// TenantControlPlaneSpec defines the desired state of TenantControlPlane.
@@ -159,6 +184,7 @@ type TenantControlPlaneSpec struct {
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:subresource:scale:specpath=.spec.controlPlane.deployment.replicas,statuspath=.status.kubernetesResources.deployment.replicas,selectorpath=.status.kubernetesResources.deployment.selector
// +kubebuilder:resource:shortName=tcp
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.kubernetes.version",description="Kubernetes version"
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.kubernetesResources.version.status",description="Kubernetes version"

View File

@@ -61,6 +61,7 @@ func (in *AdditionalMetadata) DeepCopy() *AdditionalMetadata {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AddonSpec) DeepCopyInto(out *AddonSpec) {
*out = *in
out.ImageOverrideTrait = in.ImageOverrideTrait
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddonSpec.
@@ -104,7 +105,7 @@ func (in *AddonsSpec) DeepCopyInto(out *AddonsSpec) {
}
if in.KubeProxy != nil {
in, out := &in.KubeProxy, &out.KubeProxy
*out = new(KubeProxySpec)
*out = new(AddonSpec)
**out = **in
}
}
@@ -520,6 +521,32 @@ func (in *DataStoreStatus) DeepCopy() *DataStoreStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.TopologySpreadConstraints != nil {
in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
*out = make([]v1.TopologySpreadConstraint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(ControlPlaneComponentsResources)
@@ -592,6 +619,21 @@ func (in *ExternalKubernetesObjectStatus) DeepCopy() *ExternalKubernetesObjectSt
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageOverrideTrait) DeepCopyInto(out *ImageOverrideTrait) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageOverrideTrait.
func (in *ImageOverrideTrait) DeepCopy() *ImageOverrideTrait {
if in == nil {
return nil
}
out := new(ImageOverrideTrait)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
*out = *in
@@ -665,21 +707,6 @@ func (in *KonnectivityStatus) DeepCopy() *KonnectivityStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxySpec) DeepCopyInto(out *KubeProxySpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxySpec.
func (in *KubeProxySpec) DeepCopy() *KubeProxySpec {
if in == nil {
return nil
}
out := new(KubeProxySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeadmConfigStatus) DeepCopyInto(out *KubeadmConfigStatus) {
*out = *in

1
assets/kamaji-logo.svg Normal file
View File

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" role="img" viewBox="11.85 8.10 202.80 187.55"><title>Kamaji</title><path d="M32.1 13.7c-2.4.9-6.3 3.5-8.6 5.8-7.7 7.7-7.5 5-7.5 82.5 0 77.4-.2 74.8 7.5 82.5 7.7 7.8 4.2 7.5 90 7.5s82.3.3 90-7.5c7.7-7.7 7.5-5.1 7.5-82.5s.2-74.8-7.5-82.5c-7.8-7.8-4.1-7.5-90.4-7.4-66.7 0-77.2.3-81 1.6zm160.5 9.9c1.9.9 4.4 3.1 5.7 4.8l2.2 3.1v141l-2.2 3.1c-4.8 6.7-1.1 6.4-84.8 6.4s-80 .3-84.8-6.4l-2.2-3.1v-141l2.2-3.1c4.8-6.6.8-6.4 84.6-6.4 68 0 76.3.2 79.3 1.6z"/><path d="M90.1 33.7c-5.1 2.5-7.3 6.7-6.8 13.1.3 4.1 1 5.9 3.3 8.4s2.5 3 .9 2.3c-2-.7-25.1-4.6-29-4.9-1.1 0-2 .5-2 1.4 0 1.1-1.2 1.5-4.9 1.5-6.7 0-6.8 1.9-.4 4 8.2 2.7 9 3.4 3.3 3.5-5.3 0-8.2 1.1-7.1 2.8.7 1.2-2.7 2.2-8.1 2.2-7 0-6.5 2.4 1.1 5.1l3.9 1.4-2.9.5c-4.3.8-3.2 2.3 2.8 4.1l5.3 1.5-5.2 2.7c-8.2 4.2-8.3 5.8-.4 6.1 5.6.2 7.3 1.1 4.2 2.1-2.3.7-2.8 3.1-.9 3.7.7.3-.5 2-2.8 4-5.6 5.3-4 6.4 6.2 4.5 4.4-.8 8.1-1.3 8.3-1.2.2.2-1.3 2.4-3.3 4.8-2 2.4-3.6 4.7-3.6 5.2 0 .4 1.4.5 3 .3 2.9-.4 4 .5 2 1.7-.5.3-1 1.3-1 2.2 0 1.6 2.2 1.5 6.5-.3 1.7-.7 1.6-.2-.9 3-5.4 7.2.7 6.5 13.6-1.4 2.7-1.7 5.1-3 5.4-3 .3 0-.9 2.1-2.7 4.6-4.5 6.6-2.5 7.9 3.7 2.3 4.6-4.3 4.7-4.3 3-1.2-1.9 3.8-2.1 5.6-.4 5.1.6-.2 7.1-7.1 14.3-15.4 7.2-8.2 13.7-14.9 14.5-14.9.8 0 7.3 6.7 14.6 15 7.2 8.2 13.7 15.1 14.3 15.3 1.6.5 1.4-1.4-.5-5-1.6-3.2-1.6-3.2 3.2 1 6 5.1 7.8 4 3.5-2.2-1.8-2.5-3-4.6-2.7-4.6.3 0 2.7 1.3 5.4 3 12.9 7.9 19 8.6 13.6 1.4-2.5-3.2-2.6-3.7-.9-3 5.9 2.5 7.7 1.7 5.6-2.3-.9-1.5-.6-1.7 2-1.3 3.8.6 3.7-.5-.7-5.7-2-2.3-3.5-4.4-3.2-4.6.2-.2 2.1 0 4.3.4 13.9 3 16.4 1.8 9.8-4.3-2.1-1.9-3.2-3.6-2.5-3.6 2 0 1.4-2.8-.9-3.5-3.2-1-1.3-2 4.2-2.1 7.9-.2 7.8-1.9-.4-6.1l-5.2-2.7 5.4-1.6c6.4-1.8 7.9-4 2.9-4.1h-3.3l3.9-1.5c7.3-2.6 8.4-5.4 2.2-5.4-5.1 0-9.6-1.1-9-2.2 1.1-1.7-1.8-2.8-7.1-2.8-5.7-.1-4.9-.8 3.3-3.5 6.4-2.1 6.3-4-.4-4-3.7 0-4.9-.4-4.9-1.5 0-.9-.9-1.4-2-1.4-3.9.3-27 4.2-29 4.9-1.6.7-1.4.2.9-2.3 3.7-4 4.7-11.3 2.2-16.1-4.8-9.2-18.8-9.3-23.8 0-4.4 8.3.2 18.4 9.5 20.5 3 .6 2.8.8-5.5 4l-8.8 3.3-8.7-3.3c-8.1-3.2-8.4-3.4-5.5-4.1 1.7-.3 4.3-1.5 5.7-2.7 13.1-10.3.6-30.4-14.4-23.1zm77.6 98.4c-3.6 2.1-.8 7.7 3.2 6.4 2.1-.6 3.5-3.1 2.5-4.6-1.1-1.8-4-2.7-5.7-1.8zm8.3 3.9c0 1.9.5 2.1 6.3 1.8 4.7-.2 6.2-.7 6.2-1.8s-1.5-1.6-6.2-1.8c-5.8-.3-6.3-.1-6.3 1.8zm-135.6.3c-.2.7-.3 7.4-.2 14.8l.3 13.4 3.3.3c3.1.3 3.2.2 3.2-3.4 0-2.5.7-4.6 2.1-6l2.1-2.3 5 6c3.9 4.7 5.6 5.9 7.8 5.9 1.6 0 3.1-.3 3.3-.8.3-.4-2.1-4-5.4-8.1-3.2-4-5.9-7.6-5.9-8 0-.4 2.5-3.1 5.5-6.1 3-3 5.5-5.8 5.5-6.2 0-.4-1.5-.8-3.3-.8-2.8 0-4.4 1-9.6 6.5-3.5 3.6-6.5 6.5-6.7 6.5-.2 0-.4-2.9-.4-6.5V135h-3c-1.7 0-3.3.6-3.6 1.3zm31.2 7c-1.1.8-1.5 1.9-1 3 .5 1.4 1.3 1.6 4 1.1 4.2-.8 8.4.2 8.4 2 0 .8-1.8 1.5-5.1 1.9-6 .7-8.9 2.9-8.9 6.6 0 3.2.8 4.4 3.7 6 2.9 1.5 5.2 1.4 8.6-.3 2.3-1.3 2.7-1.3 2.7 0 0 .9 1.1 1.4 3 1.4h3v-8.6c0-8.1-.1-8.7-2.9-11.5-2.5-2.5-3.7-2.9-8.3-2.9-3 0-6.2.6-7.2 1.3zm11.2 13.9c-.2 1.7-1.1 2.4-3.2 2.6-3.3.4-5.1-1-4.3-3.2.4-1.1 1.9-1.6 4.2-1.6 3.2 0 3.6.3 3.3 2.2zm13.4-4l.3 11.3h6l.5-7.8c.5-7.6 1.5-9.6 4.7-9.7 3 0 4.3 3.2 4.3 10.6v7.4h3c3 0 3 0 3-5.9 0-7.3 1.2-10.7 4.1-11.6 3.8-1.3 5.9 2.5 5.9 10.6v6.9h6v-9c0-8.3-.2-9.3-2.5-11.5-2.9-3-9.8-3.5-12.7-.8-1.7 1.5-1.9 1.5-3.6 0-2.2-2-9.2-2.3-11.1-.5-1.1 1-1.4 1-1.8 0-.3-.6-1.8-1.2-3.4-1.2h-3l.3 11.2zm45.4-9.9c-1.1.8-1.5 1.9-1 3 .5 1.4 1.3 1.6 4 1.1 4.2-.8 8.4.2 8.4 2 0 .8-1.8 1.5-5.1 1.9-6 .7-8.9 2.9-8.9 6.6 0 3.2.8 4.4 3.7 6 2.9 1.5 5.2 1.4 8.6-.3 2.3-1.3 2.7-1.3 2.7 0 0 .9 1.1 1.4 3 1.4h3v-8.6c0-8.1-.1-8.7-2.9-11.5-2.5-2.5-3.7-2.9-8.3-2.9-3 0-6.2.6-7.2 1.3zm11.2 13.9c-.2 1.7-1.1 2.4-3.2 2.6-3.3.4-5.1-1-4.3-3.2.4-1.1 1.9-1.6 4.2-1.6 3.2 0 3.6.3 3.3 2.2zm13-2.5c-.3 12.8-.3 12.8-2.7 12.8-1.5 0-2.7.8-3.1 2-2 5.4 9.4 4.3 11.9-1.2.6-1.3 1.1-7.7 1.1-14.3v-12h-6.9l-.3 12.7zm13.4-1.5l.3 11.3h6v-22l-3.3-.3-3.3-.3.3 11.3z"/></svg>

After

Width:  |  Height:  |  Size: 3.6 KiB

View File

@@ -1,37 +1,26 @@
apiVersion: v2
name: kamaji
description: Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler.
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.5.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: 0.1.0
appVersion: v0.1.1
description: Kamaji is a tool aimed to build and operate a Managed Kubernetes Service
with a fraction of the operational burden. With Kamaji, you can deploy and operate
hundreds of Kubernetes clusters as a hyper-scaler.
home: https://github.com/clastix/kamaji
sources: ["https://github.com/clastix/kamaji"]
kubeVersion: ">=1.18"
icon: https://github.com/clastix/kamaji/raw/master/assets/kamaji-logo.png
kubeVersion: ">=1.21.0-0"
maintainers:
- email: iam@mendrugory.com
name: Gonzalo Gabriel Jiménez Fuentes
- email: dario@tranchitella.eu
name: Dario Tranchitella
- email: me@maxgio.it
name: Massimiliano Giovagnoli
- email: me@bsctl.io
name: Adriano Pezzuto
- email: iam@mendrugory.com
name: Gonzalo Gabriel Jiménez Fuentes
name: kamaji
sources:
- https://github.com/clastix/kamaji
type: application
version: 0.10.2
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/release-name: kamaji
catalog.cattle.io/display-name: Kamaji - Managed Kubernetes Service

View File

@@ -1,6 +1,6 @@
# kamaji
![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.1.0](https://img.shields.io/badge/AppVersion-0.1.0-informational?style=flat-square)
![Version: 0.10.2](https://img.shields.io/badge/Version-0.10.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.1.1](https://img.shields.io/badge/AppVersion-v0.1.1-informational?style=flat-square)
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler.
@@ -8,10 +8,10 @@ Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a
| Name | Email | Url |
| ---- | ------ | --- |
| Gonzalo Gabriel Jiménez Fuentes | <iam@mendrugory.com> | |
| Dario Tranchitella | <dario@tranchitella.eu> | |
| Massimiliano Giovagnoli | <me@maxgio.it> | |
| Adriano Pezzuto | <me@bsctl.io> | |
| Gonzalo Gabriel Jiménez Fuentes | <iam@mendrugory.com> | |
## Source Code
@@ -19,7 +19,7 @@ Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a
## Requirements
Kubernetes: `>=1.18`
Kubernetes: `>=1.21.0-0`
[Kamaji](https://github.com/clastix/kamaji) requires a [multi-tenant `etcd`](https://github.com/clastix/kamaji-internal/blob/master/deploy/getting-started-with-kamaji.md#setup-internal-multi-tenant-etcd) cluster.
This Helm Chart starting from v0.1.1 provides the installation of an internal `etcd` in order to streamline the local test. If you'd like to use an externally managed etcd instance, you can specify the overrides and by setting the value `etcd.deploy=false`.
@@ -97,7 +97,7 @@ Here the values you can override:
| etcd.overrides.caSecret.namespace | string | `"kamaji-system"` | Namespace of the secret which contains CA's certificate and private key. (default: "kamaji-system") |
| etcd.overrides.clientSecret.name | string | `"root-client-certs"` | Name of the secret which contains ETCD client certificates. (default: "root-client-certs") |
| etcd.overrides.clientSecret.namespace | string | `"kamaji-system"` | Name of the namespace where the secret which contains ETCD client certificates is. (default: "kamaji-system") |
| etcd.overrides.endpoints | object | `{"etcd-0":"https://etcd-0.etcd.kamaji-system.svc.cluster.local","etcd-1":"https://etcd-1.etcd.kamaji-system.svc.cluster.local","etcd-2":"https://etcd-2.etcd.kamaji-system.svc.cluster.local"}` | (map) Dictionary of the endpoints for the etcd cluster's members, key is the name of the etcd server. Don't define any port, inflected from .etcd.peerApiPort value. |
| etcd.overrides.endpoints | object | `{"etcd-0":"etcd-0.etcd.kamaji-system.svc.cluster.local","etcd-1":"etcd-1.etcd.kamaji-system.svc.cluster.local","etcd-2":"etcd-2.etcd.kamaji-system.svc.cluster.local"}` | (map) Dictionary of the endpoints for the etcd cluster's members, key is the name of the etcd server. Don't define the protocol (TLS is automatically inflected), or any port, inflected from .etcd.peerApiPort value. |
| etcd.peerApiPort | int | `2380` | The peer API port which servers are listening to. |
| etcd.persistence.accessModes[0] | string | `"ReadWriteOnce"` | |
| etcd.persistence.size | string | `"10Gi"` | |
@@ -110,7 +110,7 @@ Here the values you can override:
| healthProbeBindAddress | string | `":8081"` | The address the probe endpoint binds to. (default ":8081") |
| image.pullPolicy | string | `"Always"` | |
| image.repository | string | `"clastix/kamaji"` | The container image of the Kamaji controller. |
| image.tag | string | `"latest"` | |
| image.tag | string | `nil` | Overrides the image tag whose default is the chart appVersion. |
| imagePullSecrets | list | `[]` | |
| livenessProbe | object | `{"httpGet":{"path":"/healthz","port":"healthcheck"},"initialDelaySeconds":15,"periodSeconds":20}` | The livenessProbe for the controller container |
| loggingDevel.enable | bool | `false` | (string) Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default false) |

View File

@@ -0,0 +1,30 @@
# Kamaji - Managed Kubernetes Service
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden.
Useful links:
- [Kamaji Github repository](https://github.com/clastix/kamaji)
- [Kamaji Documentation](https://github.com/clastix/kamaji/docs/)
## Requirements
* Kubernetes v1.22+
* Helm v3
# Installation
To install the Chart with the release name `kamaji`:
helm upgrade --install --namespace kamaji-system --create-namespace clastix/kamaji
Show the status:
helm status kamaji -n kamaji-system
Upgrade the Chart
helm upgrade kamaji -n kamaji-system clastix/kamaji
Uninstall the Chart
helm uninstall kamaji -n kamaji-system

File diff suppressed because it is too large Load Diff

View File

@@ -6,6 +6,10 @@ metadata:
{{- include "etcd.labels" . | nindent 4 }}
name: {{ include "etcd.csrConfigMapName" . }}
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed"
data:
ca-csr.json: |-
{

View File

@@ -18,35 +18,13 @@ spec:
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
restartPolicy: Never
initContainers:
- name: cfssl
image: cfssl/cfssl:latest
command:
- bash
- -c
- |-
cfssl gencert -initca /csr/ca-csr.json | cfssljson -bare /certs/ca &&
mv /certs/ca.pem /certs/ca.crt && mv /certs/ca-key.pem /certs/ca.key &&
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=peer-authentication /csr/peer-csr.json | cfssljson -bare /certs/peer &&
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=peer-authentication /csr/server-csr.json | cfssljson -bare /certs/server &&
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=client-authentication /csr/root-client-csr.json | cfssljson -bare /certs/root-client
volumeMounts:
- mountPath: /certs
name: certs
- mountPath: /csr
name: csr
- name: kubectl
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
command:
- sh
- -c
- |-
kubectl --namespace={{ .Release.Namespace }} delete secret --ignore-not-found=true {{ include "etcd.caSecretName" . }} {{ include "etcd.clientSecretName" . }} &&
kubectl --namespace={{ .Release.Namespace }} create secret generic {{ include "etcd.caSecretName" . }} --from-file=/certs/ca.crt --from-file=/certs/ca.key --from-file=/certs/peer-key.pem --from-file=/certs/peer.pem --from-file=/certs/server-key.pem --from-file=/certs/server.pem &&
kubectl --namespace={{ .Release.Namespace }} create secret tls {{ include "etcd.clientSecretName" . }} --key=/certs/root-client-key.pem --cert=/certs/root-client.pem &&
kubectl --namespace={{ .Release.Namespace }} rollout status sts/etcd --timeout=300s
volumeMounts:
- mountPath: /certs
name: certs
containers:
- command:
- bash
@@ -82,10 +60,7 @@ spec:
- name: root-certs
secret:
secretName: {{ include "etcd.clientSecretName" . }}
optional: true
- name: csr
configMap:
name: {{ include "etcd.csrConfigMapName" . }}
- name: certs
emptyDir: {}
secret:
secretName: {{ include "etcd.caSecretName" . }}
{{- end }}

View File

@@ -0,0 +1,60 @@
{{- if .Values.etcd.deploy }}
apiVersion: batch/v1
kind: Job
metadata:
labels:
{{- include "etcd.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": "hook-succeeded"
name: "{{ .Release.Name }}-etcd-certs"
namespace: {{ .Release.Namespace }}
spec:
template:
metadata:
name: "{{ .Release.Name }}"
spec:
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
restartPolicy: Never
initContainers:
- name: cfssl
image: cfssl/cfssl:latest
command:
- bash
- -c
- |-
cfssl gencert -initca /csr/ca-csr.json | cfssljson -bare /certs/ca &&
mv /certs/ca.pem /certs/ca.crt && mv /certs/ca-key.pem /certs/ca.key &&
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=peer-authentication /csr/peer-csr.json | cfssljson -bare /certs/peer &&
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=peer-authentication /csr/server-csr.json | cfssljson -bare /certs/server &&
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=client-authentication /csr/root-client-csr.json | cfssljson -bare /certs/root-client
volumeMounts:
- mountPath: /certs
name: certs
- mountPath: /csr
name: csr
containers:
- name: kubectl
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
command:
- sh
- -c
- |-
kubectl --namespace={{ .Release.Namespace }} delete secret --ignore-not-found=true {{ include "etcd.caSecretName" . }} {{ include "etcd.clientSecretName" . }} &&
kubectl --namespace={{ .Release.Namespace }} create secret generic {{ include "etcd.caSecretName" . }} --from-file=/certs/ca.crt --from-file=/certs/ca.key --from-file=/certs/peer-key.pem --from-file=/certs/peer.pem --from-file=/certs/server-key.pem --from-file=/certs/server.pem &&
kubectl --namespace={{ .Release.Namespace }} create secret tls {{ include "etcd.clientSecretName" . }} --key=/certs/root-client-key.pem --cert=/certs/root-client.pem
volumeMounts:
- mountPath: /certs
name: certs
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
volumes:
- name: csr
configMap:
name: {{ include "etcd.csrConfigMapName" . }}
- name: certs
emptyDir: {}
{{- end }}

View File

@@ -5,6 +5,9 @@ metadata:
labels:
{{- include "etcd.labels" . | nindent 4 }}
name: etcd-gen-certs-role
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "-5"
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
@@ -38,6 +41,9 @@ metadata:
{{- include "etcd.labels" . | nindent 4 }}
name: etcd-gen-certs-rolebiding
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "-5"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role

View File

@@ -5,5 +5,8 @@ metadata:
labels:
{{- include "etcd.labels" . | nindent 4 }}
name: {{ include "etcd.serviceAccountName" . }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "-5"
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -9,8 +9,8 @@ image:
# -- The container image of the Kamaji controller.
repository: clastix/kamaji
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
tag: latest
# -- Overrides the image tag whose default is the chart appVersion.
tag:
# -- A list of extra arguments to add to the kamaji controller default ones
extraArgs: []
@@ -67,11 +67,11 @@ etcd:
name: root-client-certs
# -- Name of the namespace where the secret which contains ETCD client certificates is. (default: "kamaji-system")
namespace: kamaji-system
# -- (map) Dictionary of the endpoints for the etcd cluster's members, key is the name of the etcd server. Don't define any port, inflected from .etcd.peerApiPort value.
# -- (map) Dictionary of the endpoints for the etcd cluster's members, key is the name of the etcd server. Don't define the protocol (TLS is automatically inflected), or any port, inflected from .etcd.peerApiPort value.
endpoints:
etcd-0: https://etcd-0.etcd.kamaji-system.svc.cluster.local
etcd-1: https://etcd-1.etcd.kamaji-system.svc.cluster.local
etcd-2: https://etcd-2.etcd.kamaji-system.svc.cluster.local
etcd-0: etcd-0.etcd.kamaji-system.svc.cluster.local
etcd-1: etcd-1.etcd.kamaji-system.svc.cluster.local
etcd-2: etcd-2.etcd.kamaji-system.svc.cluster.local
# -- ETCD Compaction interval (e.g. "5m0s"). (default: "0" (disabled))
compactionInterval: 0

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,7 @@
# It should be run by config/default
resources:
- bases/kamaji.clastix.io_tenantcontrolplanes.yaml
- bases/kamaji.clastix.io_datastores.yaml
#+kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:

View File

@@ -25,3 +25,4 @@ spec:
- "--health-probe-bind-address=:8081"
- "--metrics-bind-address=127.0.0.1:8080"
- "--leader-elect"
- "--datastore=kamaji-etcd"

View File

@@ -7,6 +7,234 @@ metadata:
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.2
creationTimestamp: null
name: datastores.kamaji.clastix.io
spec:
group: kamaji.clastix.io
names:
kind: DataStore
listKind: DataStoreList
plural: datastores
singular: datastore
scope: Cluster
versions:
- additionalPrinterColumns:
- description: Kamaji data store driver
jsonPath: .spec.driver
name: Driver
type: string
- description: Age
jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha1
schema:
openAPIV3Schema:
description: DataStore is the Schema for the datastores API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: DataStoreSpec defines the desired state of DataStore.
properties:
basicAuth:
description: In case of authentication enabled for the given data store, specifies the username and password pair. This value is optional.
properties:
password:
properties:
content:
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
format: byte
type: string
secretReference:
properties:
keyPath:
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
type: string
name:
description: name is unique within a namespace to reference a secret resource.
type: string
namespace:
description: namespace defines the space within which the secret name must be unique.
type: string
required:
- keyPath
type: object
x-kubernetes-map-type: atomic
type: object
username:
properties:
content:
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
format: byte
type: string
secretReference:
properties:
keyPath:
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
type: string
name:
description: name is unique within a namespace to reference a secret resource.
type: string
namespace:
description: namespace defines the space within which the secret name must be unique.
type: string
required:
- keyPath
type: object
x-kubernetes-map-type: atomic
type: object
required:
- password
- username
type: object
driver:
description: The driver to use to connect to the shared datastore.
type: string
endpoints:
description: List of the endpoints to connect to the shared datastore. No need for protocol, just bare IP/FQDN and port.
items:
type: string
type: array
tlsConfig:
description: Defines the TLS/SSL configuration required to connect to the data store in a secure way.
properties:
certificateAuthority:
description: Retrieve the Certificate Authority certificate and private key, such as bare content of the file, or a SecretReference. The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
properties:
certificate:
properties:
content:
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
format: byte
type: string
secretReference:
properties:
keyPath:
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
type: string
name:
description: name is unique within a namespace to reference a secret resource.
type: string
namespace:
description: namespace defines the space within which the secret name must be unique.
type: string
required:
- keyPath
type: object
x-kubernetes-map-type: atomic
type: object
privateKey:
properties:
content:
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
format: byte
type: string
secretReference:
properties:
keyPath:
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
type: string
name:
description: name is unique within a namespace to reference a secret resource.
type: string
namespace:
description: namespace defines the space within which the secret name must be unique.
type: string
required:
- keyPath
type: object
x-kubernetes-map-type: atomic
type: object
required:
- certificate
type: object
clientCertificate:
description: Specifies the SSL/TLS key and private key pair used to connect to the data store.
properties:
certificate:
properties:
content:
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
format: byte
type: string
secretReference:
properties:
keyPath:
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
type: string
name:
description: name is unique within a namespace to reference a secret resource.
type: string
namespace:
description: namespace defines the space within which the secret name must be unique.
type: string
required:
- keyPath
type: object
x-kubernetes-map-type: atomic
type: object
privateKey:
properties:
content:
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
format: byte
type: string
secretReference:
properties:
keyPath:
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
type: string
name:
description: name is unique within a namespace to reference a secret resource.
type: string
namespace:
description: namespace defines the space within which the secret name must be unique.
type: string
required:
- keyPath
type: object
x-kubernetes-map-type: atomic
type: object
required:
- certificate
- privateKey
type: object
required:
- certificateAuthority
- clientCertificate
type: object
required:
- driver
- endpoints
- tlsConfig
type: object
status:
description: DataStoreStatus defines the observed state of DataStore.
properties:
usedBy:
description: List of the Tenant Control Planes, namespaced named, using this data store.
items:
type: string
type: array
type: object
type: object
served: true
storage: true
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.2
@@ -64,13 +292,20 @@ spec:
description: Addons contain which addons are enabled
properties:
coreDNS:
description: AddonSpec defines the spec for every addon.
description: Enables the DNS addon in the Tenant Cluster. The registry and the tag are configurable, the image is hard-coded to `coredns`.
properties:
imageRepository:
description: ImageRepository sets the container registry to pull images from. if not set, the default ImageRepository will be used instead.
type: string
imageTag:
description: ImageTag allows to specify a tag for the image. In case this value is set, kubeadm does not change automatically the version of the above components during upgrades.
type: string
type: object
konnectivity:
description: KonnectivitySpec defines the spec for Konnectivity.
description: Enables the Konnectivity addon in the Tenant Cluster, required if the worker nodes are in a different network.
properties:
agentImage:
default: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent
default: registry.k8s.io/kas-network-proxy/proxy-agent
description: AgentImage defines the container image for Konnectivity's agent.
type: string
proxyPort:
@@ -100,20 +335,24 @@ spec:
type: object
type: object
serverImage:
default: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-server
default: registry.k8s.io/kas-network-proxy/proxy-server
description: ServerImage defines the container image for Konnectivity's server.
type: string
version:
default: v0.0.31
default: v0.0.32
description: Version for Konnectivity server and agent.
type: string
required:
- proxyPort
type: object
kubeProxy:
description: Enables the kube-proxy addon in the Tenant Cluster. The registry and the tag are configurable, the image is hard-coded to `kube-proxy`.
properties:
imageOverride:
description: Specify the image overried of the kube-proxy to install in the Tenant Cluster. If not specified, the Kubernetes default one will be used, according to the specified version.
imageRepository:
description: ImageRepository sets the container registry to pull images from. if not set, the default ImageRepository will be used instead.
type: string
imageTag:
description: ImageTag allows to specify a tag for the image. In case this value is set, kubeadm does not change automatically the version of the above components during upgrades.
type: string
type: object
type: object
@@ -135,6 +374,478 @@ spec:
type: string
type: object
type: object
affinity:
description: 'If specified, the Tenant Control Plane pod''s scheduling constraints. More info: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/'
properties:
nodeAffinity:
description: Describes node affinity scheduling rules for the pod.
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
items:
description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
properties:
preference:
description: A node selector term, associated with the corresponding weight.
properties:
matchExpressions:
description: A list of node selector requirements by node's labels.
items:
description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: The label key that the selector applies to.
type: string
operator:
description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements by node's fields.
items:
description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: The label key that the selector applies to.
type: string
operator:
description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
weight:
description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
format: int32
type: integer
required:
- preference
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
properties:
nodeSelectorTerms:
description: Required. A list of node selector terms. The terms are ORed.
items:
description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
properties:
matchExpressions:
description: A list of node selector requirements by node's labels.
items:
description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: The label key that the selector applies to.
type: string
operator:
description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements by node's fields.
items:
description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: The label key that the selector applies to.
type: string
operator:
description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
type: array
required:
- nodeSelectorTerms
type: object
x-kubernetes-map-type: atomic
type: object
podAffinity:
description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term, associated with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of resources, in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
items:
description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
properties:
labelSelector:
description: A label query over a set of resources, in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
podAntiAffinity:
description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term, associated with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of resources, in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
items:
description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
properties:
labelSelector:
description: A label query over a set of resources, in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
type: object
extraArgs:
description: ExtraArgs allows adding additional arguments to the Control Plane components, such as kube-apiserver, controller-manager, and scheduler.
properties:
@@ -156,6 +867,11 @@ spec:
type: string
type: array
type: object
nodeSelector:
additionalProperties:
type: string
description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/'
type: object
replicas:
default: 2
format: int32
@@ -230,6 +946,97 @@ spec:
type: object
type: object
type: object
tolerations:
description: 'If specified, the Tenant Control Plane pod''s tolerations. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/'
items:
description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
type: string
operator:
description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
type: string
type: object
type: array
topologySpreadConstraints:
description: TopologySpreadConstraints describes how the Tenant Control Plane pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. In case of nil underlying LabelSelector, the Kamaji one for the given Tenant Control Plane will be used. All topologySpreadConstraints are ANDed.
items:
description: TopologySpreadConstraint specifies how to spread matching pods among the given topology.
properties:
labelSelector:
description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
matchLabelKeys:
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
items:
type: string
type: array
x-kubernetes-list-type: atomic
maxSkew:
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
format: int32
type: integer
minDomains:
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
format: int32
type: integer
nodeAffinityPolicy:
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
type: string
nodeTaintsPolicy:
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
type: string
topologyKey:
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
type: string
whenUnsatisfiable:
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
type: string
required:
- maxSkew
- topologyKey
- whenUnsatisfiable
type: object
type: array
type: object
ingress:
description: Defining the options for an Optional Ingress which will expose API Server of the Tenant Control Plane
@@ -864,6 +1671,9 @@ spec:
description: Total number of non-terminated pods targeted by this deployment (their labels match the selector).
format: int32
type: integer
selector:
description: Selector is the label selector used to group the Tenant Control Plane Pods used by the scale subresource.
type: string
unavailableReplicas:
description: Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.
format: int32
@@ -875,6 +1685,7 @@ spec:
required:
- name
- namespace
- selector
type: object
ingress:
description: KubernetesIngressStatus defines the status for the Tenant Control Plane Ingress in the management cluster.
@@ -1093,6 +1904,10 @@ spec:
served: true
storage: true
subresources:
scale:
labelSelectorPath: .status.kubernetesResources.deployment.selector
specReplicasPath: .spec.controlPlane.deployment.replicas
statusReplicasPath: .status.kubernetesResources.deployment.replicas
status: {}
---
apiVersion: v1
@@ -1388,9 +2203,10 @@ spec:
- --health-probe-bind-address=:8081
- --metrics-bind-address=127.0.0.1:8080
- --leader-elect
- --datastore=kamaji-etcd
command:
- /manager
image: clastix/kamaji:latest
image: clastix/kamaji:v0.1.1
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -1428,9 +2244,9 @@ spec:
basicAuth: null
driver: etcd
endpoints:
- etcd-0.etcd.kamaji-system.svc:2379
- etcd-1.etcd.kamaji-system.svc:2379
- etcd-2.etcd.kamaji-system.svc:2379
- etcd-0.etcd.kamaji-system.svc.cluster.local:2379
- etcd-1.etcd.kamaji-system.svc.cluster.local:2379
- etcd-2.etcd.kamaji-system.svc.cluster.local:2379
tlsConfig:
certificateAuthority:
certificate:

View File

@@ -13,4 +13,4 @@ kind: Kustomization
images:
- name: controller
newName: clastix/kamaji
newTag: latest
newTag: v0.1.1

View File

@@ -5,9 +5,9 @@ metadata:
spec:
driver: etcd
endpoints:
- etcd-0.etcd.kamaji-system.svc:2379
- etcd-1.etcd.kamaji-system.svc:2379
- etcd-2.etcd.kamaji-system.svc:2379
- etcd-0.etcd.kamaji-system.svc.cluster.local:2379
- etcd-1.etcd.kamaji-system.svc.cluster.local:2379
- etcd-2.etcd.kamaji-system.svc.cluster.local:2379
basicAuth: null
tlsConfig:
certificateAuthority:

View File

@@ -52,14 +52,14 @@ func GetDeletableResources(config GroupDeleteableResourceBuilderConfiguration) [
func getDefaultResources(config GroupResourceBuilderConfiguration) []resources.Resource {
resources := append(getUpgradeResources(config.client), getKubernetesServiceResources(config.client)...)
resources = append(resources, getKubeadmConfigResources(config.client, getTmpDirectory(config.tcpReconcilerConfig.TmpBaseDirectory, config.tenantControlPlane), config.DataStore)...)
resources = append(resources, getKubernetesCertificatesResources(config.client, config.log, config.tcpReconcilerConfig, config.tenantControlPlane)...)
resources = append(resources, getKubeconfigResources(config.client, config.log, config.tcpReconcilerConfig, config.tenantControlPlane)...)
resources = append(resources, getKubernetesCertificatesResources(config.client, config.tcpReconcilerConfig, config.tenantControlPlane)...)
resources = append(resources, getKubeconfigResources(config.client, config.tcpReconcilerConfig, config.tenantControlPlane)...)
resources = append(resources, getKubernetesStorageResources(config.client, config.Connection, config.DataStore)...)
resources = append(resources, getInternalKonnectivityResources(config.client, config.log)...)
resources = append(resources, getInternalKonnectivityResources(config.client)...)
resources = append(resources, getKubernetesDeploymentResources(config.client, config.tcpReconcilerConfig, config.DataStore)...)
resources = append(resources, getKubernetesIngressResources(config.client)...)
resources = append(resources, getKubeadmPhaseResources(config.client, config.log)...)
resources = append(resources, getKubeadmAddonResources(config.client, config.log)...)
resources = append(resources, getKubeadmPhaseResources(config.client)...)
resources = append(resources, getKubeadmAddonResources(config.client)...)
resources = append(resources, getExternalKonnectivityResources(config.client)...)
return resources
@@ -109,61 +109,52 @@ func getKubeadmConfigResources(c client.Client, tmpDirectory string, dataStore k
}
}
func getKubernetesCertificatesResources(c client.Client, log logr.Logger, tcpReconcilerConfig TenantControlPlaneReconcilerConfig, tenantControlPlane kamajiv1alpha1.TenantControlPlane) []resources.Resource {
func getKubernetesCertificatesResources(c client.Client, tcpReconcilerConfig TenantControlPlaneReconcilerConfig, tenantControlPlane kamajiv1alpha1.TenantControlPlane) []resources.Resource {
return []resources.Resource{
&resources.CACertificate{
Client: c,
Log: log,
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
},
&resources.FrontProxyCACertificate{
Client: c,
Log: log,
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
},
&resources.SACertificate{
Client: c,
Log: log,
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
},
&resources.APIServerCertificate{
Client: c,
Log: log,
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
},
&resources.APIServerKubeletClientCertificate{
Client: c,
Log: log,
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
},
&resources.FrontProxyClientCertificate{
Client: c,
Log: log,
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
},
}
}
func getKubeconfigResources(c client.Client, log logr.Logger, tcpReconcilerConfig TenantControlPlaneReconcilerConfig, tenantControlPlane kamajiv1alpha1.TenantControlPlane) []resources.Resource {
func getKubeconfigResources(c client.Client, tcpReconcilerConfig TenantControlPlaneReconcilerConfig, tenantControlPlane kamajiv1alpha1.TenantControlPlane) []resources.Resource {
return []resources.Resource{
&resources.KubeconfigResource{
Name: "admin-kubeconfig",
Client: c,
Log: log,
KubeConfigFileName: resources.AdminKubeConfigFileName,
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
},
&resources.KubeconfigResource{
Name: "controller-manager-kubeconfig",
Client: c,
Log: log,
KubeConfigFileName: resources.ControllerManagerKubeConfigFileName,
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
},
&resources.KubeconfigResource{
Name: "scheduler-kubeconfig",
Client: c,
Log: log,
KubeConfigFileName: resources.SchedulerKubeConfigFileName,
TmpDirectory: getTmpDirectory(tcpReconcilerConfig.TmpBaseDirectory, tenantControlPlane),
},
@@ -207,41 +198,36 @@ func getKubernetesIngressResources(c client.Client) []resources.Resource {
}
}
func getKubeadmPhaseResources(c client.Client, log logr.Logger) []resources.Resource {
func getKubeadmPhaseResources(c client.Client) []resources.Resource {
return []resources.Resource{
&resources.KubeadmPhase{
Name: "upload-config-kubeadm",
Client: c,
Log: log,
Phase: resources.PhaseUploadConfigKubeadm,
},
&resources.KubeadmPhase{
Name: "upload-config-kubelet",
Client: c,
Log: log,
Phase: resources.PhaseUploadConfigKubelet,
},
&resources.KubeadmPhase{
Name: "bootstrap-token",
Client: c,
Log: log,
Phase: resources.PhaseBootstrapToken,
},
}
}
func getKubeadmAddonResources(c client.Client, log logr.Logger) []resources.Resource {
func getKubeadmAddonResources(c client.Client) []resources.Resource {
return []resources.Resource{
&resources.KubeadmAddonResource{
Name: "coredns",
Client: c,
Log: log,
KubeadmAddon: resources.AddonCoreDNS,
},
&resources.KubeadmAddonResource{
Name: "kubeproxy",
Client: c,
Log: log,
KubeadmAddon: resources.AddonKubeProxy,
},
}
@@ -249,44 +235,19 @@ func getKubeadmAddonResources(c client.Client, log logr.Logger) []resources.Reso
func getExternalKonnectivityResources(c client.Client) []resources.Resource {
return []resources.Resource{
&konnectivity.ServiceAccountResource{
Client: c,
Name: "konnectivity-sa",
},
&konnectivity.ClusterRoleBindingResource{
Client: c,
Name: "konnectivity-clusterrolebinding",
},
&konnectivity.KubernetesDeploymentResource{
Client: c,
Name: "konnectivity-deployment",
},
&konnectivity.ServiceResource{
Client: c,
Name: "konnectivity-service",
},
&konnectivity.Agent{
Client: c,
Name: "konnectivity-agent",
},
&konnectivity.ServiceAccountResource{Client: c},
&konnectivity.ClusterRoleBindingResource{Client: c},
&konnectivity.KubernetesDeploymentResource{Client: c},
&konnectivity.ServiceResource{Client: c},
&konnectivity.Agent{Client: c},
}
}
func getInternalKonnectivityResources(c client.Client, log logr.Logger) []resources.Resource {
func getInternalKonnectivityResources(c client.Client) []resources.Resource {
return []resources.Resource{
&konnectivity.EgressSelectorConfigurationResource{
Client: c,
Name: "konnectivity-egress-selector-configuration",
},
&konnectivity.CertificateResource{
Client: c,
Log: log,
Name: "konnectivity-certificate",
},
&konnectivity.KubeconfigResource{
Client: c,
Name: "konnectivity-kubeconfig",
},
&konnectivity.EgressSelectorConfigurationResource{Client: c},
&konnectivity.CertificateResource{Client: c},
&konnectivity.KubeconfigResource{Client: c},
}
}

View File

@@ -1,16 +0,0 @@
include etcd/Makefile
deploy_path := $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
.DEFAULT_GOAL := kamaji
.PHONY: etcd-cluster
reqs: etcd-cluster
.PHONY: kamaji
kamaji: reqs
@kubectl apply -f $(deploy_path)/../../config/install.yaml
.PHONY: destroy
destroy: etcd-certificates/cleanup
@kubectl delete -f $(deploy_path)/../../config/install.yaml

View File

@@ -1,26 +0,0 @@
# Deploy Kamaji
## Quickstart with KinD
```sh
make -C kind
```
## Multi-tenant etcd cluster
> This assumes you already have a running Kubernetes cluster and kubeconfig.
```sh
make -C etcd
```
## Multi-tenant cluster using Kine
`kine` is an `etcd` shim that allows using different datastore.
Kamaji actually support the following backends:
- [MySQL](kine/mysql/README.md)
- [PostgreSQL](kine/postgresql/README.md)
> This assumes you already have a running Kubernetes cluster and kubeconfig.

View File

@@ -1,680 +0,0 @@
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
calico_backend: "vxlan"
# Configure the MTU to use for workload interfaces and tunnels.
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
# You can override auto-detection by providing a non-zero value.
veth_mtu: "0"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"log_file_path": "/var/log/calico/cni/cni.log",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico-node
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- watch
- list
- apiGroups:
- ""
resources:
- endpoints
- services
verbs:
- watch
- list
- get
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- watch
- list
- apiGroups:
- ""
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- patch
- apiGroups:
- crd.projectcalico.org
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
- apiGroups:
- crd.projectcalico.org
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups:
- crd.projectcalico.org
resources:
- ipamconfigs
verbs:
- get
- apiGroups:
- crd.projectcalico.org
resources:
- blockaffinities
verbs:
- watch
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico-kube-controllers
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- watch
- list
- get
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- ippools
verbs:
- list
- apiGroups:
- crd.projectcalico.org
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- hostendpoints
verbs:
- get
- list
- create
- update
- delete
- apiGroups:
- crd.projectcalico.org
resources:
- clusterinformations
verbs:
- get
- create
- update
- apiGroups:
- crd.projectcalico.org
resources:
- kubecontrollersconfigurations
verbs:
- get
- create
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: docker.io/calico/cni:v3.20.0
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
securityContext:
privileged: true
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: docker.io/calico/cni:v3.20.0
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
securityContext:
privileged: true
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: docker.io/calico/pod2daemon-flexvol:v3.20.0
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
securityContext:
privileged: true
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: docker.io/calico/node:v3.20.0
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Never"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
value: "Always"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the VXLAN tunnel device.
- name: FELIX_VXLANMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the Wireguard tunnel device.
- name: FELIX_WIREGUARDMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "10.36.0.0/16"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
#- -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
#- -bird-ready
periodSeconds: 10
timeoutSeconds: 10
volumeMounts:
# For maintaining CNI plugin API credentials.
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
# parent directory.
- name: sysfs
mountPath: /sys/fs/
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
mountPropagation: Bidirectional
- name: cni-log-dir
mountPath: /var/log/calico/cni
readOnly: true
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: sysfs
hostPath:
path: /sys/fs/
type: DirectoryOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to access CNI logs.
- name: cni-log-dir
hostPath:
path: /var/log/calico/cni
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: docker.io/calico/kube-controllers:v3.20.0
resouces:
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
livenessProbe:
exec:
command:
- /usr/bin/check-status
- -l
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
periodSeconds: 10
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers

File diff suppressed because it is too large Load Diff

View File

@@ -1,687 +0,0 @@
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
calico_backend: "bird"
# Configure the MTU to use for workload interfaces and tunnels.
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
# You can override auto-detection by providing a non-zero value.
veth_mtu: "0"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"log_file_path": "/var/log/calico/cni/cni.log",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico-node
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- watch
- list
- apiGroups:
- ""
resources:
- endpoints
- services
verbs:
- watch
- list
- get
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- watch
- list
- apiGroups:
- ""
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- patch
- apiGroups:
- crd.projectcalico.org
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
- apiGroups:
- crd.projectcalico.org
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups:
- crd.projectcalico.org
resources:
- ipamconfigs
verbs:
- get
- apiGroups:
- crd.projectcalico.org
resources:
- blockaffinities
verbs:
- watch
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico-kube-controllers
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- watch
- list
- get
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- ippools
verbs:
- list
- apiGroups:
- crd.projectcalico.org
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- hostendpoints
verbs:
- get
- list
- create
- update
- delete
- apiGroups:
- crd.projectcalico.org
resources:
- clusterinformations
verbs:
- get
- create
- update
- apiGroups:
- crd.projectcalico.org
resources:
- kubecontrollersconfigurations
verbs:
- get
- create
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: docker.io/calico/cni:v3.20.0
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
securityContext:
privileged: true
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: docker.io/calico/cni:v3.20.0
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
securityContext:
privileged: true
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: docker.io/calico/pod2daemon-flexvol:v3.20.0
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
securityContext:
privileged: true
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: docker.io/calico/node:v3.20.0
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
- name: IP_AUTODETECTION_METHOD
value: "can-reach=192.168.32.0"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Never"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
value: "Never"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the VXLAN tunnel device.
- name: FELIX_VXLANMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the Wireguard tunnel device.
- name: FELIX_WIREGUARDMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
# - name: CALICO_IPV4POOL_CIDR
# value: "192.168.0.0/16"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
- -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
- -bird-ready
periodSeconds: 10
timeoutSeconds: 10
volumeMounts:
# For maintaining CNI plugin API credentials.
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
# parent directory.
- name: sysfs
mountPath: /sys/fs/
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
mountPropagation: Bidirectional
- name: cni-log-dir
mountPath: /var/log/calico/cni
readOnly: true
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: sysfs
hostPath:
path: /sys/fs/
type: DirectoryOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to access CNI logs.
- name: cni-log-dir
hostPath:
path: /var/log/calico/cni
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: docker.io/calico/kube-controllers:v3.20.0
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
resources:
livenessProbe:
exec:
command:
- /usr/bin/check-status
- -l
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
periodSeconds: 10
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
---

View File

@@ -54,13 +54,6 @@ spec:
app: etcd
spec:
serviceAccountName: etcd
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: etcd
volumes:
- name: certs
secret:

View File

@@ -1,18 +1,21 @@
# azure parameters
export KAMAJI_REGION=westeurope
export KAMAJI_RG=Kamaji
# https://docs.microsoft.com/en-us/azure/aks/faq#why-are-two-resource-groups-created-with-aks
export KAMAJI_CLUSTER=kamaji
export KAMAJI_NODE_RG=MC_${KAMAJI_RG}_${KAMAJI_CLUSTER}_${KAMAJI_REGION}
export KAMAJI_CLUSTER=kamaji
export KAMAJI_VNET_NAME=kamaji-net
export KAMAJI_VNET_ADDRESS=10.224.0.0/12
export KAMAJI_SUBNET_NAME=kamaji-subnet
export KAMAJI_SUBNET_ADDRESS=10.224.0.0/16
# kamaji parameters
export KAMAJI_NAMESPACE=kamaji-system
# tenant cluster parameters
export TENANT_NAMESPACE=tenants
export TENANT_NAMESPACE=default
export TENANT_NAME=tenant-00
export TENANT_DOMAIN=$KAMAJI_REGION.cloudapp.azure.com
export TENANT_VERSION=v1.23.5
export TENANT_VERSION=v1.25.0
export TENANT_PORT=6443 # port used to expose the tenant api server
export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server
export TENANT_POD_CIDR=10.36.0.0/16
@@ -21,10 +24,8 @@ export TENANT_DNS_SERVICE=10.96.0.10
export TENANT_VM_SIZE=Standard_D2ds_v4
export TENANT_VM_IMAGE=UbuntuLTS
export TENANT_RG=$TENANT_NAME
export TENANT_NSG=$TENANT_NAME-nsg
export TENANT_VNET_NAME=$TENANT_NAME
export TENANT_VNET_ADDRESS=172.12.0.0/16
export TENANT_SUBNET_NAME=$TENANT_NAME-subnet
export TENANT_SUBNET_ADDRESS=172.12.10.0/24
export TENANT_VMSS=$TENANT_NAME-vmss
export TENANT_SUBNET_ADDRESS=10.225.0.0/16
export TENANT_VMSS=$TENANT_NAME-vmss

View File

@@ -2,10 +2,10 @@
export KAMAJI_NAMESPACE=kamaji-system
# tenant cluster parameters
export TENANT_NAMESPACE=tenants
export TENANT_NAMESPACE=default
export TENANT_NAME=tenant-00
export TENANT_DOMAIN=clastix.labs
export TENANT_VERSION=v1.23.5
export TENANT_VERSION=v1.25.0
export TENANT_PORT=6443 # port used to expose the tenant api server
export TENANT_PROXY_PORT=8132 # port used to expose the konnectivity server
export TENANT_POD_CIDR=10.36.0.0/16

View File

@@ -1,26 +0,0 @@
# Kine integration
[kine](https://github.com/k3s-io/kine) is an `etcd` shim that allows to use a different datastore for your Kubernetes cluster.
Kamaji actually allows to run a shared datastore using different MySQL and PostgreSQL schemas per Tenant.
This can help in overcoming the `etcd` limitation regarding scalability and cluster size, as well with HA and replication.
## Kamaji additional CLI flags
Kamaji read the data store configuration from a cluster-scoped resource named `DataStore`, containing all tha required details to secure a connection using a specific driver.
- [Example of a `etcd` DataStore](./../../config/samples/kamaji_v1alpha1_datastore_etcd.yaml)
- [Example of a `MySQL` DataStore](./../../config/samples/kamaji_v1alpha1_datastore_mysql.yaml)
- [Example of a `PostgreSQL` DataStore](./../../config/samples/kamaji_v1alpha1_datastore_postgresql.yaml)
Once the datastore is running, and the `DataStore` has been created with the required details, we need to provide information about it to Kamaji by using the following flag and pointing to the resource name:
```
--datastore={.metadata.name}
```
## Drivers
Further details on the setup for each driver are available here:
- [MySQL/MariaDB](../deploy/kine/mysql/README.md)
- [PostgreSQL](../deploy/kine/postgresql/README.md)

View File

@@ -15,6 +15,7 @@ mariadb-certificates:
chmod 644 $(ROOT_DIR)/certs/*
mariadb-secret:
@kubectl create namespace kamaji-system --dry-run=client -o yaml | kubectl apply -f -
@kubectl -n kamaji-system create secret generic mysql-config \
--from-file=$(ROOT_DIR)/certs/ca.crt --from-file=$(ROOT_DIR)/certs/ca.key \
--from-file=$(ROOT_DIR)/certs/server.key --from-file=$(ROOT_DIR)/certs/server.crt \

View File

@@ -1,74 +0,0 @@
# MySQL as Kubernetes Storage
Kamaji offers the possibility of having a different storage system than `ETCD` thanks to [kine](https://github.com/k3s-io/kine). One of the implementations is [MySQL](https://www.mysql.com/).
Kamaji project is developed using [kind](https://kind.sigs.k8s.io), therefore, MySQL (or [MariaDB](https://mariadb.org/) in this case) will be deployed into the local kubernetes cluster in order to be used as storage for the tenants.
There is a Makefile to help with the process:
# Setup
Setup of the MySQL/MariaDB backend can be easily issued with a single command.
```bash
$ make mariadb
```
This action will perform all the necessary stuffs to have MariaDB as Kubernetes storage backend using kine.
```shell
rm -rf /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs && mkdir /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs
cfssl gencert -initca /home/prometherion/Documents/clastix/kamaji/deploy/mysql/ca-csr.json | cfssljson -bare /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/ca
2022/08/18 23:52:56 [INFO] generating a new CA key and certificate from CSR
2022/08/18 23:52:56 [INFO] generate received request
2022/08/18 23:52:56 [INFO] received CSR
2022/08/18 23:52:56 [INFO] generating key: rsa-2048
2022/08/18 23:52:56 [INFO] encoded CSR
2022/08/18 23:52:56 [INFO] signed certificate with serial number 310428005543054656774215122317606431230766314770
cfssl gencert -ca=/home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/ca.crt -ca-key=/home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/ca.key \
-config=/home/prometherion/Documents/clastix/kamaji/deploy/mysql/config.json -profile=server \
/home/prometherion/Documents/clastix/kamaji/deploy/mysql/server-csr.json | cfssljson -bare /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/server
2022/08/18 23:52:56 [INFO] generate received request
2022/08/18 23:52:56 [INFO] received CSR
2022/08/18 23:52:56 [INFO] generating key: rsa-2048
2022/08/18 23:52:56 [INFO] encoded CSR
2022/08/18 23:52:56 [INFO] signed certificate with serial number 582698914718104852311252458344736030793138969927
chmod 644 /home/prometherion/Documents/clastix/kamaji/deploy/mysql/certs/*
secret/mysql-config created
secret/kine-secret created
serviceaccount/mariadb created
service/mariadb created
deployment.apps/mariadb created
persistentvolumeclaim/pvc-mariadb created
```
## Certificate creation
```bash
$ make mariadb-certificates
```
Communication between kine and the backend is encrypted, therefore, a CA and a certificate from it must be created.
## Secret Deployment
```bash
$ make mariadb-secrets
```
Previous certificates and MySQL configuration have to be available in order to be used.
They will be under the secret `kamaji-system:mysql-config`, used by the MySQL/MariaDB instance.
## Deployment
```bash
$ make mariadb-deployment
```
Finally, starts the MySQL/MariaDB installation with all the required settings, such as SSL connection, and configuration.
# Cleanup
```bash
$ make mariadb-destroy
```

View File

@@ -7,6 +7,7 @@ cnpg-setup:
cnpg-deploy:
@kubectl -n cnpg-system rollout status deployment/cnpg-controller-manager
@kubectl create namespace kamaji-system --dry-run=client -o yaml | kubectl apply -f -
@kubectl -n kamaji-system apply -f postgresql.yaml
@while ! kubectl -n kamaji-system get secret postgresql-superuser > /dev/null 2>&1; do sleep 1; done
@@ -17,11 +18,10 @@ cnpg:
sh -s -- -b $(shell git rev-parse --show-toplevel)/bin
postgresql-secret: cnpg
@kubectl -n kamaji-system get secret postgres-root-cert > /dev/null 2>&1 || $(CNPG) certificate postgres-root-cert \
@kubectl -n kamaji-system get secret postgres-root-cert > /dev/null 2>&1 || $(CNPG) -n kamaji-system certificate postgres-root-cert \
--cnpg-cluster postgresql \
--cnpg-user $$(kubectl -n kamaji-system get secret postgresql-superuser -o jsonpath='{.data.username}' | base64 -d)
postgresql-destroy:
@kubectl delete -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.16.0.yaml --ignore-not-found && \
kubectl delete secret postgres-root-cert --ignore-not-found && \
kubectl delete secret kine-secret --ignore-not-found
kubectl -n kamaji-system delete secret postgres-root-cert --ignore-not-found

View File

@@ -1,74 +0,0 @@
# PostgreSQL as Kubernetes Storage
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine).
One of the implementations is [PostgreSQL](https://www.postgresql.org/).
Kamaji project is developed using [kind](https://kind.sigs.k8s.io), therefore, a PostgreSQL instance must be deployed in advance into the local kubernetes cluster in order to be used as storage for the tenants.
For the sake of simplicity, the [cloudnative-pg](https://cloudnative-pg.io/) Operator will be used to simplify the setup of it.
There is a Makefile to help with the process:
## Setup
```bash
$ make postgresql
```
This target will install the `cloudnative-pg`, creating the PostgreSQL instance in the Kamaji Namespace, along with the generation of the required Secret resource for the kine integration.
This action is idempotent and doesn't overwrite values if they already exist.
```shell
namespace/cnpg-system unchanged
customresourcedefinition.apiextensions.k8s.io/backups.postgresql.cnpg.io configured
customresourcedefinition.apiextensions.k8s.io/clusters.postgresql.cnpg.io configured
customresourcedefinition.apiextensions.k8s.io/poolers.postgresql.cnpg.io configured
customresourcedefinition.apiextensions.k8s.io/scheduledbackups.postgresql.cnpg.io configured
serviceaccount/cnpg-manager unchanged
clusterrole.rbac.authorization.k8s.io/cnpg-manager configured
clusterrolebinding.rbac.authorization.k8s.io/cnpg-manager-rolebinding unchanged
configmap/cnpg-default-monitoring unchanged
service/cnpg-webhook-service unchanged
deployment.apps/cnpg-controller-manager unchanged
mutatingwebhookconfiguration.admissionregistration.k8s.io/cnpg-mutating-webhook-configuration configured
validatingwebhookconfiguration.admissionregistration.k8s.io/cnpg-validating-webhook-configuration configured
deployment "cnpg-controller-manager" successfully rolled out
cluster.postgresql.cnpg.io/postgresql unchanged
secret/postgres-root-cert created
```
## Operator setup
```bash
$ make cnpg-setup
```
This target will apply all the required manifests with the `cloudnative-pg` CRD, and required RBAC, and Deployment.
Release [v1.16.0](https://github.com/cloudnative-pg/cloudnative-pg/releases/tag/v1.16.0) has been tested successfully.
## SSL certificate Secret generation
```bash
$ make postgresql-secret
```
This target will download locally the `kubectl-cnpg` utility to generate an SSL certificate required to secure the connection to the PostgreSQL instance.
## Certificate generation
```bash
$ make postgresql-secret
```
Generate the Certificate required to connect to the DataStore.
## Teardown
```bash
$ make postgresql-destroy
```
This will lead to the deletion of the `cloudnative-pg` Operator, along with any instance, and related secrets.
This action is idempotent.

View File

@@ -28,5 +28,5 @@ runcmd:
- sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
- echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
- sudo apt update
- sudo apt install -y kubelet=1.23.5-00 kubeadm=1.23.5-00 kubectl=1.23.5-00
- sudo apt-mark hold kubelet kubeadm kubectl
- sudo apt install -y kubelet=1.25.0-00 kubeadm=1.25.0-00 kubectl=1.25.0-00
- sudo apt-mark hold kubelet kubeadm kubectl containerd

View File

@@ -1,13 +0,0 @@
# Kamaji documentation
- [Core Concepts](./concepts.md)
- [Architecture](./architecture.md)
- [Getting started](./getting-started-with-kamaji.md)
- Guides:
- [Deploy Kamaji](./kamaji-deployment-guide.md)
- [Deploy Kamaji on Azure](./kamaji-azure-deployment-guide.md)
- Deploy Kamaji on AWS
- Deploy Kamaji on GCP
- Deploy Kamaji on OpenStack
- [Reference](./reference.md)
- [Versioning](./versioning.md)

View File

@@ -1 +0,0 @@
# Kamaji architecture

View File

@@ -1,31 +0,0 @@
# Core Concepts
Kamaji is a Kubernetes Operator. It turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_.
## Tenant Control Plane
What makes Kamaji special is that the Control Plane of a _“tenant cluster”_ is just one or more regular pods running in a namespace of the _“admin cluster”_ instead of a dedicated set of Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate. The Tenant Control Plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
High Availability and rolling updates of the Tenant Control Plane pods are provided by a regular Deployment. Autoscaling based on the metrics is available. A Service is used to espose the Tenant Control Plane outside of the _“admin cluster”_. The `LoadBalancer` service type is used, `NodePort` and `ClusterIP` with an Ingress Controller are still viable options, depending on the case.
Kamaji offers a [Custom Resource Definition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing a Tenant Control Plane. This *CRD* is called `TenantControlPlane`, or `tcp` in short.
## Tenant worker nodes
And what about the tenant worker nodes? They are just _"worker nodes"_, i.e. regular virtual or bare metal machines, connecting to the APIs server of the Tenant Control Plane. Kamaji's goal is to manage the lifecycle of hundreds of these _“tenant clusters”_, not only one, so how to add another tenant cluster to Kamaji? As you could expect, you have just deploys a new Tenant Control Plane in one of the _“admin cluster”_ namespace, and then joins the tenant worker nodes to it.
All the tenant clusters built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves.
## Save the state
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each tenant cluster saves the state to be able to store and retrieve data. A dedicated `etcd` cluster for each tenant cluster doesnt scale well for a managed service because `etcd` data persistence can be cumbersome at scale, rising the operational effort to mitigate it. So we have to find an alternative keeping in mind our goal for a resilient and cost-optimized solution at the same time. As we can deploy any Kubernetes cluster with an external `etcd` cluster, we explored this option for the tenant control planes. On the admin cluster, we deploy a multi-tenant `etcd` cluster storing the state of multiple tenant clusters.
With this solution, the resiliency is guaranteed by the usual `etcd` mechanism, and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that we have to operate an external `etcd` cluster, in addition to `etcd` of the _“admin cluster”_ and manage the access to be sure that each _“tenant cluster”_ uses only its data. Also, there are limits in size in `etcd`, defaulted to 2GB and configurable to a maximum of 8GB. Were solving this issue by pooling multiple `etcd` togheter and sharding the Tenant Control Planes.
Optionally, Kamaji offers the possibility of using a different storage system than `etcd` to save the state of the tenants' clusters, like MySQL or PostgreSQL compatible databases, thanks to the [kine](https://github.com/k3s-io/kine) integration.
## Requirements of design
These are requirements of design behind Kamaji:
- Communication between the _“admin cluster”_ and a _“tenant cluster”_ is unidirectional. The _“admin cluster”_ manages a _“tenant cluster”_, but a _“tenant cluster”_ has no awareness of the _“admin cluster”_.
- Communication between different _“tenant clusters”_ is not allowed.
- The worker nodes of tenant should not run anything beyond tenant's workloads.
Goals and scope may vary as the project evolves.

47
docs/content/concepts.md Normal file
View File

@@ -0,0 +1,47 @@
# Concepts
Kamaji is a Kubernetes Operator. It turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_.
These are requirements of the design behind Kamaji:
- Communication between the _“admin cluster”_ and a _“tenant cluster”_ is unidirectional. The _“admin cluster”_ manages a _“tenant cluster”_, but a _“tenant cluster”_ has no awareness of the _“admin cluster”_.
- Communication between different _“tenant clusters”_ is not allowed.
- The worker nodes of tenant should not run anything beyond tenant's workloads.
Goals and scope may vary as the project evolves.
## Tenant Control Plane
What makes Kamaji special is that the Control Plane of a _“tenant cluster”_ is just one or more regular pods running in a namespace of the _“admin cluster”_ instead of a dedicated set of Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate. The Tenant Control Plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
High Availability and rolling updates of the Tenant Control Plane pods are provided by a regular Deployment. Autoscaling based on the metrics is available. A Service is used to espose the Tenant Control Plane outside of the _“admin cluster”_. The `LoadBalancer` service type is used, `NodePort` and `ClusterIP` with an Ingress Controller are still viable options, depending on the case.
Kamaji offers a [Custom Resource Definition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing a Tenant Control Plane. This *CRD* is called `TenantControlPlane`, or `tcp` in short.
All the _“tenant clusters”_ built with Kamaji are fully compliant CNCF Kubernetes clusters and are compatible with the standard Kubernetes toolchains everybody knows and loves. See [CNCF compliance](reference/conformance.md).
## Tenant worker nodes
And what about the tenant worker nodes? They are just _"worker nodes"_, i.e. regular virtual or bare metal machines, connecting to the APIs server of the Tenant Control Plane. Kamaji's goal is to manage the lifecycle of hundreds of these _“tenant clusters”_, not only one, so how to add another tenant cluster to Kamaji? As you could expect, you have just deploys a new Tenant Control Plane in one of the _“admin cluster”_ namespace, and then joins the tenant worker nodes to it.
We have in roadmap, the Cluster APIs support as well as a Terraform provider so that you can create _“tenant clusters”_ in a declarative way.
## Datastores
Putting the Tenant Control Plane in a pod is the easiest part. Also, we have to make sure each tenant cluster saves the state to be able to store and retrieve data. A dedicated `etcd` cluster for each tenant cluster doesnt scale well for a managed service because `etcd` data persistence can be cumbersome at scale, rising the operational effort to mitigate it. So we have to find an alternative keeping in mind our goal for a resilient and cost-optimized solution at the same time.
As we can deploy any Kubernetes cluster with an external `etcd` cluster, we explored this option for the tenant control planes. On the admin cluster, we can deploy a multi-tenant `etcd` datastore to save the state of multiple tenant clusters. Kamaji offers a Custom Resource Definition called `DataStore` to provide a declarative approach of managing Tenant datastores. With this solution, the resiliency is guaranteed by the usual `etcd` mechanism, and the pods' count remains under control, so it solves the main goal of resiliency and costs optimization. The trade-off here is that we have to operate an external datastore, in addition to `etcd` of the _“admin cluster”_ and manage the access to be sure that each _“tenant cluster”_ uses only its data.
### Other storage drivers
Kamaji offers the option of using a more capable datastore than `etcd` to save the state of multiple tenants' clusters. Thanks to the native [kine](https://github.com/k3s-io/kine) integration, you can run _MySQL_ or _PostgreSQL_ compatible databases as datastore for _“tenant clusters”_.
### Pooling
By default, Kamaji is expecting to persist all the _“tenant clusters”_ data in a unique datastore that could be backed by different drivers. However, you can pick a different datastore for a specific set of _“tenant clusters”_ that could have different resources assigned or a different tiering. Pooling of multiple datastore is an option you can leverage for a very large set of _“tenant clusters”_ so you can distribute the load properly. As future improvements, we have a _datastore scheduler_ feature in roadmap so that Kamaji itself can assign automatically a _“tenant cluster”_ to the best datastore in the pool.
## Konnectivity
In addition to the standard control plane containers, Kamaji creates an instance of [konnectivity-server](https://kubernetes.io/docs/concepts/architecture/control-plane-node-communication/) running as sidecar container in the `tcp` pod and exposed on port `8132` of the `tcp` service.
This is required when the tenant worker nodes are not reachable from the `tcp` pods. The Konnectivity service consists of two parts: the Konnectivity server in the tenant control plane pod and the Konnectivity agents running on the tenant worker nodes.
After worker nodes joined the tenant control plane, the Konnectivity agents initiate connections to the Konnectivity server and maintain the network connections. After enabling the Konnectivity service, all control plane to worker nodes traffic goes through these connections.
> In Kamaji, Konnectivity is enabled by default and can be disabled when not required.

View File

@@ -0,0 +1,22 @@
# Governance
This document lays out the guidelines under which the Kamaji project will be governed.
The goal is to make sure that the roles and responsibilities are well defined and clarify how decisions are made.
## Roles
In the context of Kamaji project, we consider the following roles:
* __Users__: everyone using Kamaji, typically willing to provide feedback by proposing features and/or filing issues.
* __Contributors__: everyone contributing code, documentation, examples, tests, and participating in feature proposals as well as design discussions.
* __Maintainers__: are responsible for engaging with and assisting contributors to iterate on the contributions until it reaches acceptable quality. Maintainers can decide whether the contributions can be accepted into the project or rejected.
## Release Management
The release process will be governed by Maintainers.
## Roadmap Planning
Maintainers will share roadmap and release versions as milestones in GitHub [project's page](https://github.com/clastix/kamaji).

View File

@@ -0,0 +1,83 @@
# General
Thank you for your interest in contributing to Kamaji. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution.
## Pull Requests
Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
1. You are working against the latest source on the *master* branch.
1. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
1. You open an issue to discuss any significant work: we would hate for your time to be wasted.
To send us a pull request, please:
1. Fork the repository.
1. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it
will be hard for us to focus on your change.
1. Ensure local tests pass.
1. Commit to your fork using clear commit messages.
1. Send us a pull request, answering any default questions in the pull request interface.
1. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
Make sure to keep Pull Requests small and functional to make them easier to review, understand, and look up in commit history. This repository uses "Squash and Commit" to keep our history clean and make it easier to revert changes based on PR. Adding the appropriate documentation, unit tests and e2e tests as part of a feature is the responsibility of the
feature owner, whether it is done in the same Pull Request or not. All the Pull Requests must refer to an already open issue: this is the first phase to contribute also for informing maintainers about the issue.
## Commits
Commit's first line should not exceed 50 columns.
A commit description is welcomed to explain more the changes: just ensure to put a blank line and an arbitrary number of maximum 72 characters long lines, at most one blank line between them.
Please, split changes into several and documented small commits: this will help us to perform a better review. Commits must follow the Conventional Commits Specification, a lightweight convention on top of commit messages. It provides an easy set of rules for creating an explicit commit history; which makes it easier to write automated tools on top of. This convention dovetails with Semantic Versioning, by describing the features, fixes, and breaking changes made in commit messages. See [Conventional Commits Specification](https://www.conventionalcommits.org) to learn about Conventional Commits.
> In case of errors or need of changes to previous commits, fix them squashing to make changes atomic.
## Code convention
Kamaji is written in Golang. The changes must follow the Pull Request method where a _GitHub Action_ will
check the `golangci-lint`, so ensure your changes respect the coding standard.
### golint
You can easily check them issuing the _Make_ recipe `golint`.
```
# make golint
golangci-lint run -c .golangci.yml
```
> Enabled linters and related options are defined in the [.golanci.yml file](https://github.com/clastix/Kamaji/blob/master/.golangci.yml)
## Finding contributions to work on
Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the
default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted'
and 'good first issue' issues are a great place to start.
## Design Docs
A contributor proposes a design with a PR on the repository to allow for revisions and discussions. If a design needs to be discussed before formulating a document for it, make use of GitHub Discussions to involve the community on the discussion.
## GitHub Issues
GitHub Issues are used to file bugs, work items, and feature requests with actionable items/issues.
When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
* A reproducible test case or series of steps
* The version of the code being used
* Any modifications you've made relevant to the bug
* Anything unusual about your environment or deployment
## Miscellanea
Please, add a new single line at end of any file as the current coding style.
## Licensing
See the [LICENSE](https://github.com/clastix/Kamaji/blob/master/LICENSE) file for our project's licensing. We can ask you to confirm the licensing of your contribution.

View File

@@ -0,0 +1,2 @@
# Guidelines
Guidelines for community contributions.

View File

@@ -1,19 +1,22 @@
# Setup a minimal Kamaji for development
# Getting started
This document explains how to deploy a minimal Kamaji setup on [KinD](https://kind.sigs.k8s.io/) for development scopes. Please refer to the [Kamaji documentation](../README.md) for understanding all the terms used in this guide, as for example: `admin cluster` and `tenant control plane`.
This document explains how to deploy a minimal Kamaji setup on [KinD](https://kind.sigs.k8s.io/) for development scopes. Please refer to the [Kamaji documentation](concepts.md) for understanding all the terms used in this guide, as for example: `admin cluster`, `tenant cluster`, and `tenant control plane`.
## Pre-requisites
We assume you have installed on your workstation:
- [Docker](https://docs.docker.com/engine/install/)
- [Docker](https://docker.com)
- [KinD](https://kind.sigs.k8s.io/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
- [kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/)
- [kubectl@v1.25.0](https://kubernetes.io/docs/tasks/tools/#kubectl)
- [kubeadm@v1.25.0](https://kubernetes.io/docs/tasks/tools/#kubeadm)
- [jq](https://stedolan.github.io/jq/)
- [openssl](https://www.openssl.org/)
- [cfssl](https://github.com/cloudflare/cfssl)
- [cfssljson](https://github.com/cloudflare/cfssl)
- [cfssl/cfssljson](https://github.com/cloudflare/cfssl)
> Starting from Kamaji v0.0.2, `kubectl` and `kubeadm` need to meet at least minimum version to `v1.25.0`:
> this is required due to the latest changes addressed from the release Kubernetes 1.25 release regarding the `kubelet-config` ConfigMap required for the node join.
## Setup Kamaji on KinD
@@ -21,9 +24,9 @@ The instance of Kamaji is made of a single node hosting:
- admin control-plane
- admin worker
- multi-tenant etcd cluster
- multi-tenant datastore
### Standard
### Standard installation
You can install your KinD cluster, ETCD multi-tenant cluster and Kamaji operator with a **single command**:
@@ -35,29 +38,55 @@ Now you can [create your first `TenantControlPlane`](#deploy-tenant-control-plan
### Data store-specific
#### ETCD
Kamaji offers the possibility of using a different storage system than `ETCD` for the tenants, like `MySQL` or `PostgreSQL` compatible databases.
The multi-tenant etcd cluster is deployed as statefulset into the Kamaji node.
Run `make reqs` to setup Kamaji's requisites on KinD:
First, setup a KinD cluster:
```bash
$ make -C deploy/kind reqs
$ make -C deploy/kind kind
```
At this moment you will have your KinD up and running and ETCD cluster in multitenant mode.
#### ETCD
Deploy a multi-tenant `ETCD` cluster into the Kamaji node:
```bash
$ make -C deploy/kind etcd-cluster
```
Now you're ready to [install Kamaji operator](#install-kamaji).
#### Kine
#### MySQL
> The MySQL-compatible cluster provisioning is omitted here.
Deploy a MySQL/MariaDB backend into the Kamaji node:
Kamaji offers the possibility of using a different storage system than `ETCD` for the tenants, like MySQL or PostgreSQL compatible databases.
```bash
$ make -C deploy/kine/mysql mariadb
```
Read it more in the provided [guide](../deploy/kine/README.md).
Adjust the Kamaji install manifest `config/install.yaml` according to the example of a MySQL DataStore `config/samples/kamaji_v1alpha1_datastore_mysql.yaml` and make sure Kamaji uses the proper datastore name:
Assuming you adjusted the [Kamaji manifest](../config/install.yaml) to connect to Kine and compatible database using the proper driver, you can now install it.
```
--datastore={.metadata.name}
```
Now you're ready to [install Kamaji operator](#install-kamaji).
#### PostgreSQL
Deploy a PostgreSQL backend into the Kamaji node:
```bash
$ make -C deploy/kine/postgresql postgresql
```
Adjust the Kamaji install manifest `config/install.yaml` according to the example of a PostgreSQL DataStore `config/samples/kamaji_v1alpha1_datastore_postgresql.yaml` and make sure Kamaji uses the proper datastore name:
```
--datastore={.metadata.name}
```
Now you're ready to [install Kamaji operator](#install-kamaji).
### Install Kamaji
@@ -65,6 +94,8 @@ Assuming you adjusted the [Kamaji manifest](../config/install.yaml) to connect t
$ kubectl apply -f config/install.yaml
```
> If you experience some errors during the apply of the manifest as `resource mapping not found ... ensure CRDs are installed first`, just apply it again.
### Deploy Tenant Control Plane
Now it is the moment of deploying your first tenant control plane.
@@ -147,7 +178,7 @@ $ kubectl create -f https://raw.githubusercontent.com/aojea/kindnet/master/insta
### Join worker nodes
```bash
$ make kamaji-kind-worker-join
$ make -C deploy/kind kamaji-kind-worker-join
```
> To add more worker nodes, run again the command above.
@@ -160,12 +191,12 @@ NAME STATUS ROLES AGE VERSION
d2d4b468c9de Ready <none> 44s v1.23.4
```
> For more complex scenarios (exposing port, different version and so on), run `join-node.bash`
> For more complex scenarios (exposing port, different version and so on), run `join-node.bash`.
Tenant control plane provision has been finished in a minimal Kamaji setup based on KinD. Therefore, you could develop, test and make your own experiments with Kamaji.
## Cleanup
```bash
$ make destroy
$ make -C deploy/kind destroy
```

View File

@@ -0,0 +1,3 @@
# How to Guides
This section of the Kamaji documentation contains pages that show how to do a specific thing, typically by giving a sequence of steps.

View File

@@ -1,12 +1,19 @@
# Setup Kamaji on Azure
This guide will lead you through the process of creating a working Kamaji setup on on MS Azure. It requires:
This guide will lead you through the process of creating a working Kamaji setup on on MS Azure.
- one bootstrap local workstation
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
The guide requires:
- one bootstrap workstation
- an AKS Kubernetes cluster to run the Admin and Tenant Control Planes
- an arbitrary number of Azure virtual machines to host `Tenant`s' workloads
## Summary
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
* [Access Admin cluster](#access-admin-cluster)
* [Install DataStore](#install-datastore)
* [Install Kamaji controller](#install-kamaji-controller)
* [Create Tenant Cluster](#create-tenant-cluster)
* [Cleanup](#cleanup)
@@ -21,10 +28,10 @@ cd kamaji/deploy
We assume you have installed on your workstation:
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
- [helm](https://helm.sh/docs/intro/install/)
- [jq](https://stedolan.github.io/jq/)
- [openssl](https://www.openssl.org/)
- [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)
Make sure you have a valid Azure subscription, and login to Azure:
@@ -47,20 +54,37 @@ az group create \
--name $KAMAJI_RG \
--location $KAMAJI_REGION
az network vnet create \
--resource-group $KAMAJI_RG \
--name $KAMAJI_VNET_NAME \
--location $KAMAJI_REGION \
--address-prefix $KAMAJI_VNET_ADDRESS
az network vnet subnet create \
--resource-group $KAMAJI_RG \
--name $KAMAJI_SUBNET_NAME \
--vnet-name $KAMAJI_VNET_NAME \
--address-prefixes $KAMAJI_SUBNET_ADDRESS
KAMAJI_SUBNET_ID=$(az network vnet subnet show \
--resource-group ${KAMAJI_RG} \
--vnet-name ${KAMAJI_VNET_NAME} \
--name ${KAMAJI_SUBNET_NAME} \
--query id --output tsv)
az aks create \
--resource-group $KAMAJI_RG \
--name $KAMAJI_CLUSTER \
--location $KAMAJI_REGION \
--vnet-subnet-id $KAMAJI_SUBNET_ID \
--zones 1 2 3 \
--node-count 3 \
--nodepool-name $KAMAJI_CLUSTER \
--ssh-key-value @~/.ssh/id_rsa.pub \
--no-wait
--nodepool-name $KAMAJI_CLUSTER
```
Once the cluster formation succedes, get credentials to access the cluster as admin
```
```bash
az aks get-credentials \
--resource-group $KAMAJI_RG \
--name $KAMAJI_CLUSTER
@@ -68,41 +92,41 @@ az aks get-credentials \
And check you can access:
```
```bash
kubectl cluster-info
```
## Install Kamaji
There are multiple ways to deploy Kamaji, including a [single YAML file](../config/install.yaml) and [Helm Chart](../charts/kamaji).
## Install datastore
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of an unamanaged `etcd`. However, a managed `etcd` is highly recommended in production.
### Multi-tenant datastore
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters.
Install a multi-tenant `etcd` in the admin cluster as three replicas StatefulSet with data persistence.
The Helm [Chart](../charts/kamaji/) provides the installation of an internal `etcd`.
However, an externally managed `etcd` is highly recommended.
If you'd like to use an external one, you can specify the overrides by setting the value `etcd.deploy=false`.
Optionally, Kamaji offers the possibility of using a different storage system than `etcd` for the tenants' clusters, like MySQL or PostgreSQL compatible database, thanks to the [kine](https://github.com/k3s-io/kine) integration documented [here](../deploy/kine/README.md).
### Install with Helm Chart
Install with the `helm` in a dedicated namespace of the Admin cluster:
As alternative, the [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a manged multi-tenant `etcd` as 3 replicas StatefulSet with data persistence:
```bash
helm install --create-namespace --namespace kamaji-system kamaji clastix/kamaji
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install etcd clastix/kamaji-etcd -n kamaji-system --create-namespace
```
The Kamaji controller and the multi-tenant `etcd` are now running:
Optionally, Kamaji offers the possibility of using a different storage system for the tenants' clusters, as MySQL or PostgreSQL compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
## Install Kamaji Controller
Install Kamaji with `helm` using an unmanaged `etcd` as datastore:
```bash
kubectl -n kamaji-system get pods
NAME READY STATUS RESTARTS AGE
etcd-0 1/1 Running 0 120m
etcd-1 1/1 Running 0 120m
etcd-2 1/1 Running 0 119m
kamaji-857fcdf599-4fb2p 2/2 Running 0 120m
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
```
You just turned your AKS cluster into a Kamaji cluster to run multiple Tenant Control Planes.
Alternatively, if you opted for a managed `etcd` datastore:
```
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace --set etcd.deploy=false
```
Congratulations! You just turned your Azure Kubernetes AKS cluster into a Kamaji cluster capable to run multiple Tenant Control Planes.
## Create Tenant Cluster
@@ -135,17 +159,17 @@ spec:
resources:
apiServer:
requests:
cpu: 500m
cpu: 250m
memory: 512Mi
limits: {}
controllerManager:
requests:
cpu: 250m
cpu: 125m
memory: 256Mi
limits: {}
scheduler:
requests:
cpu: 250m
cpu: 125m
memory: 256Mi
limits: {}
service:
@@ -198,31 +222,30 @@ spec:
type: LoadBalancer
EOF
kubectl create namespace ${TENANT_NAMESPACE}
kubectl apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
```
Make sure:
- the following annotation: `service.beta.kubernetes.io/azure-load-balancer-internal=true` is set on the `tcp` service. It tells Azure to expose the service within an internal loadbalancer.
- the following annotation: `service.beta.kubernetes.io/azure-dns-label-name=${TENANT_NAME}` is set the public loadbalancer service. It tells Azure to expose the Tenant Control Plane with domain name: `${TENANT_NAME}.${TENANT_DOMAIN}`.
- the following annotation: `service.beta.kubernetes.io/azure-dns-label-name=${TENANT_NAME}` is set the public loadbalancer service. It tells Azure to expose the Tenant Control Plane with public domain name: `${TENANT_NAME}.${TENANT_DOMAIN}`.
### Working with Tenant Control Plane
Check the access to the Tenant Control Plane:
```
```bash
curl -k https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.azure.com/healthz
curl -k https://${TENANT_NAME}.${KAMAJI_REGION}.cloudapp.azure.com/version
```
Let's retrieve the `kubeconfig` in order to work with it:
```
```bash
kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-admin-kubeconfig -o json \
| jq -r '.data["admin.conf"]' \
| base64 -d \
| base64 --decode \
> ${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig config \
@@ -248,118 +271,65 @@ NAME ENDPOINTS AGE
kubernetes 10.240.0.100:6443 57m
```
### Prepare the Infrastructure for the Tenant virtual machines
Kamaji provides Control Plane as a Service, so the tenant user can join his own virtual machines as worker nodes. Each tenant can place his virtual machines in a dedicated Azure virtual network.
### Prepare worker nodes to join
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other tools, as for example, Terrform.
Prepare the Tenant infrastructure:
```
az group create \
--name $TENANT_RG \
--location $KAMAJI_REGION
az network nsg create \
--resource-group $TENANT_RG \
--name $TENANT_NSG
az network nsg rule create \
--resource-group $TENANT_RG \
--nsg-name $TENANT_NSG \
--name $TENANT_NSG-ssh \
--protocol tcp \
--priority 1000 \
--destination-port-range 22 \
--access allow
az network vnet create \
--resource-group $TENANT_RG \
--name $TENANT_VNET_NAME \
--address-prefix $TENANT_VNET_ADDRESS \
--subnet-name $TENANT_SUBNET_NAME \
--subnet-prefix $TENANT_SUBNET_ADDRESS
az network vnet subnet create \
--resource-group $TENANT_RG \
--vnet-name $TENANT_VNET_NAME \
--name $TENANT_SUBNET_NAME \
--address-prefixes $TENANT_SUBNET_ADDRESS \
--network-security-group $TENANT_NSG
```
Connection between the Tenant virtual network and the Kamaji AKS virtual network leverages on the [Azure Network Peering](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-peering-overview).
Enable the network peering between the Tenant Virtual Network and the Kamaji AKS Virtual Network:
Create an Azure VM Stateful Set to host worker nodes
```bash
KAMAJI_VNET_NAME=`az network vnet list -g $KAMAJI_NODE_RG --query [].name --out tsv`
KAMAJI_VNET_ID=`az network vnet list -g $KAMAJI_NODE_RG --query [].id --out tsv`
TENANT_VNET_ID=`az network vnet list -g $TENANT_RG --query [].id --out tsv`
az network vnet peering create \
--resource-group $TENANT_RG \
--name $TENANT_NAME-$KAMAJI_CLUSTER \
--vnet-name $TENANT_VNET_NAME \
--remote-vnet $KAMAJI_VNET_ID \
--allow-vnet-access
az network vnet peering create \
--resource-group $KAMAJI_NODE_RG \
--name $KAMAJI_CLUSTER-$TENANT_NAME \
az network vnet subnet create \
--resource-group $KAMAJI_RG \
--name $TENANT_SUBNET_NAME \
--vnet-name $KAMAJI_VNET_NAME \
--remote-vnet $TENANT_VNET_ID \
--allow-vnet-access
```
--address-prefixes $TENANT_SUBNET_ADDRESS
[Azure Network Security Groups](https://docs.microsoft.com/en-us/azure/virtual-network/network-security-groups-overview) can be used to control the traffic between the Tenant virtual network and the Kamaji AKS virtual network for a stronger isolation. See the required [ports and protocols](https://kubernetes.io/docs/reference/ports-and-protocols/) between Kubernetes control plane and worker nodes.
### Create the tenant virtual machines
Create an Azure VM Stateful Set to host virtual machines
```
az vmss create \
--name $TENANT_VMSS \
--resource-group $TENANT_RG \
--resource-group $KAMAJI_RG \
--image $TENANT_VM_IMAGE \
--public-ip-per-vm \
--vnet-name $TENANT_VNET_NAME \
--vnet-name $KAMAJI_VNET_NAME \
--subnet $TENANT_SUBNET_NAME \
--ssh-key-value @~/.ssh/id_rsa.pub \
--computer-name-prefix $TENANT_NAME- \
--nsg $TENANT_NSG \
--custom-data ./tenant-cloudinit.yaml \
--instance-count 0
--load-balancer "" \
--instance-count 0
az vmss update \
--resource-group $TENANT_RG \
--resource-group $KAMAJI_RG \
--name $TENANT_VMSS \
--set virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].enableIPForwarding=true
az vmss scale \
--resource-group $TENANT_RG \
--resource-group $KAMAJI_RG \
--name $TENANT_VMSS \
--new-capacity 3
```
### Join the tenant virtual machines to the tenant control plane
The current approach for joining nodes is to use the `kubeadm` one therefore, we will create a bootstrap token to perform the action:
### Join worker nodes
The current approach for joining nodes is to use `kubeadm` and therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable:
```bash
TENANT_ADDR=$(kubectl -n ${TENANT_NAMESPACE} get svc ${TENANT_NAME} -o json | jq -r ."spec.loadBalancerIP")
JOIN_CMD=$(echo "sudo kubeadm join ${TENANT_ADDR}:6443 ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command |cut -d" " -f4-)
```
A bash loop will be used to join all the available nodes.
```bash
HOSTS=($(az vmss list-instance-public-ips \
--resource-group $TENANT_RG \
--name $TENANT_VMSS \
--query "[].ipAddress" \
--output tsv))
VMIDS=($(az vmss list-instances \
--resource-group $KAMAJI_RG \
--name $TENANT_VMSS \
--query [].instanceId \
--output tsv))
for i in ${!HOSTS[@]}; do
HOST=${HOSTS[$i]}
echo $HOST
ssh ${USER}@${HOST} -t ${JOIN_CMD};
for i in ${!VMIDS[@]}; do
VMID=${VMIDS[$i]}
az vmss run-command create \
--name join-tenant-control-plane \
--vmss-name $TENANT_VMSS \
--resource-group $KAMAJI_RG \
--instance-id ${VMID} \
--script "${JOIN_CMD}"
done
```
@@ -369,51 +339,43 @@ Checking the nodes:
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
tenant-00-000000 NotReady <none> 112s v1.23.5
tenant-00-000002 NotReady <none> 92s v1.23.5
tenant-00-000003 NotReady <none> 71s v1.23.5
tenant-00-000000 NotReady <none> 112s v1.25.0
tenant-00-000002 NotReady <none> 92s v1.25.0
tenant-00-000003 NotReady <none> 71s v1.25.0
```
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In our case, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico).
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In this guide, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico), but feel free to use one of your taste.
Download the latest stable Calico manifest:
```bash
kubectl apply -f calico-cni/calico-crd.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl apply -f calico-cni/calico-azure.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/calico.yaml -O
```
And after a while, `kube-system` pods will be running.
As per [documentation](https://projectcalico.docs.tigera.io/reference/public-cloud/azure), Calico in VXLAN mode is supported on Azure while IPIP packets are blocked by the Azure network fabric. Make sure you edit the manifest above and set the following variables:
- `CLUSTER_TYPE="k8s"`
- `CALICO_IPV4POOL_IPIP="Never"`
- `CALICO_IPV4POOL_VXLAN="Always"`
Apply to the tenant cluster:
```bash
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get po -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-8594699699-dlhbj 1/1 Running 0 3m
calico-node-kxf6n 1/1 Running 0 3m
calico-node-qtdlw 1/1 Running 0 3m
coredns-64897985d-2v5lc 1/1 Running 0 5m
coredns-64897985d-nq276 1/1 Running 0 5m
kube-proxy-cwdww 1/1 Running 0 3m
kube-proxy-m48v4 1/1 Running 0 3m
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig apply -f calico.yaml
```
And the nodes will be ready
And after a while, nodes will be ready
```bash
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
tenant-00-000000 Ready <none> 3m38s v1.23.5
tenant-00-000002 Ready <none> 3m18s v1.23.5
tenant-00-000003 Ready <none> 2m57s v1.23.5
tenant-00-000000 Ready <none> 3m38s v1.25.0
tenant-00-000002 Ready <none> 3m18s v1.25.0
tenant-00-000003 Ready <none> 2m57s v1.25.0
```
## Cleanup
To get rid of the Tenant infrastructure, remove the RESOURCE_GROUP:
```
az group delete --name $TENANT_RG --yes --no-wait
```
To get rid of the Kamaji infrastructure, remove the RESOURCE_GROUP:
```

View File

@@ -1,14 +1,19 @@
# Setup Kamaji
This guide will lead you through the process of creating a working Kamaji setup on a generic Kubernetes cluster. It requires:
# Setup Kamaji on a generic infrastructure
This guide will lead you through the process of creating a working Kamaji setup on a generic infrastructure, either virtual or bare metal.
- one bootstrap local workstation
- a Kubernetes cluster 1.22+, to run the Admin and Tenant Control Planes
- an arbitrary number of machines to host Tenants' workloads
The material here is relatively dense. We strongly encourage you to dedicate time to walk through these instructions, with a mind to learning. We do NOT provide any "one-click" deployment here. However, once you've understood the components involved it is encouraged that you build suitable, auditable GitOps deployment processes around your final infrastructure.
> In this guide, we assume the machines are running `Ubuntu 20.04`.
The guide requires:
- one bootstrap workstation
- a Kubernetes cluster to run the Admin and Tenant Control Planes
- an arbitrary number of machines to host `Tenant`s' workloads
## Summary
* [Prepare the bootstrap workspace](#prepare-the-bootstrap-workspace)
* [Access Admin cluster](#access-admin-cluster)
* [Install DataStore](#install-datastore)
* [Install Kamaji controller](#install-kamaji-controller)
* [Create Tenant Cluster](#create-tenant-cluster)
* [Cleanup](#cleanup)
@@ -23,10 +28,10 @@ cd kamaji/deploy
We assume you have installed on your workstation:
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
- [kubeadm](https://kubernetes.io/docs/tasks/tools/#kubeadm)
- [helm](https://helm.sh/docs/intro/install/)
- [jq](https://stedolan.github.io/jq/)
- [openssl](https://www.openssl.org/)
## Access Admin cluster
In Kamaji, an Admin Cluster is a regular Kubernetes cluster which hosts zero to many Tenant Cluster Control Planes. The admin cluster acts as management cluster for all the Tenant clusters and implements Monitoring, Logging, and Governance of all the Kamaji setup, including all Tenant clusters.
@@ -37,42 +42,46 @@ Throughout the following instructions, shell variables are used to indicate valu
source kamaji.env
```
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the admin cluster should provide:
Any regular and conformant Kubernetes v1.22+ cluster can be turned into a Kamaji setup. To work properly, the admin cluster should provide at least:
- CNI module installed, eg. [Calico](https://github.com/projectcalico/calico), [Cilium](https://github.com/cilium/cilium).
- CSI module installed with a Storage Class for the Tenants' `etcd`.
- CSI module installed with a Storage Class for the Tenants' `etcd`. Local Persistent Volumes are an option.
- Support for LoadBalancer Service Type, or alternatively, an Ingress Controller, eg. [ingress-nginx](https://github.com/kubernetes/ingress-nginx), [haproxy](https://github.com/haproxytech/kubernetes-ingress).
- Monitoring Stack, eg. [Prometheus](https://github.com/prometheus-community).
Make sure you have a `kubeconfig` file with admin permissions on the cluster you want to turn into Kamaji Admin Cluster.
## Install Kamaji
There are multiple ways to deploy Kamaji, including a [single YAML file](../config/install.yaml) and [Helm Chart](../charts/kamaji).
## Install datastore
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters. The Kamaji Helm Chart provides the installation of an unamanaged `etcd`. However, a managed `etcd` is highly recommended in production.
### Multi-tenant datastore
The Kamaji controller needs to access a multi-tenant datastore in order to save data of the tenants' clusters. Install a multi-tenant `etcd` in the admin cluster as three replicas StatefulSet with data persistence. The Helm [Chart](../charts/kamaji/) provides the installation of an internal `etcd`. However, an externally managed `etcd` is highly recommended. If you'd like to use an external one, you can specify the overrides by setting the value `etcd.deploy=false`.
Optionally, Kamaji offers the possibility of using a different storage system than `etcd` for the tenants' clusters, like MySQL compatible database, thanks to the [kine](https://github.com/k3s-io/kine) integration [here](../deploy/kine/mysql/README.md).
### Install with Helm Chart
Install with the `helm` in a dedicated namespace of the Admin cluster:
As alternative, the [kamaji-etcd](https://github.com/clastix/kamaji-etcd) project provides a viable option to setup a manged multi-tenant `etcd` as 3 replicas StatefulSet with data persistence:
```bash
helm install --create-namespace --namespace kamaji-system kamaji clastix/kamaji
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install etcd clastix/kamaji-etcd -n kamaji-system --create-namespace
```
The Kamaji controller and the multi-tenant `etcd` are now running:
Optionally, Kamaji offers the possibility of using a different storage system for the tenants' clusters, as MySQL or PostgreSQL compatible database, thanks to the native [kine](https://github.com/k3s-io/kine) integration.
## Install Kamaji Controller
Install Kamaji with `helm` using an unmanaged `etcd` as datastore:
```bash
kubectl -n kamaji-system get pods
NAME READY STATUS RESTARTS AGE
etcd-0 1/1 Running 0 120m
etcd-1 1/1 Running 0 120m
etcd-2 1/1 Running 0 119m
kamaji-857fcdf599-4fb2p 2/2 Running 0 120m
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace
```
You just turned your Kubernetes cluster into a Kamaji cluster to run multiple Tenant Control Planes.
Alternatively, if you opted for a managed `etcd` datastore:
```bash
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install kamaji clastix/kamaji -n kamaji-system --create-namespace --set etcd.deploy=false
```
Congratulations! You just turned your Kubernetes cluster into a Kamaji cluster capable to run multiple Tenant Control Planes.
## Create Tenant Cluster
@@ -101,17 +110,17 @@ spec:
resources:
apiServer:
requests:
cpu: 500m
cpu: 250m
memory: 512Mi
limits: {}
controllerManager:
requests:
cpu: 250m
cpu: 125m
memory: 256Mi
limits: {}
scheduler:
requests:
cpu: 250m
cpu: 125m
memory: 256Mi
limits: {}
service:
@@ -146,8 +155,7 @@ spec:
limits: {}
EOF
kubectl create namespace ${TENANT_NAMESPACE}
kubectl apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
kubectl -n ${TENANT_NAMESPACE} apply -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
```
After a few minutes, check the created resources in the tenants namespace and when ready it will look similar to the following:
@@ -171,39 +179,8 @@ service/tenant-00 LoadBalancer 10.32.132.241 192.168.32.240 6443:32152/T
The regular Tenant Control Plane containers: `kube-apiserver`, `kube-controller-manager`, `kube-scheduler` are running unchanged in the `tcp` pods instead of dedicated machines and they are exposed through a service on the port `6443` of worker nodes in the Admin cluster.
```yaml
apiVersion: v1
kind: Service
metadata:
name: tenant-00
spec:
clusterIP: 10.32.233.177
loadBalancerIP: 192.168.32.240
ports:
- name: kube-apiserver
nodePort: 31073
port: 6443
protocol: TCP
targetPort: 6443
- name: konnectivity-server
nodePort: 32125
port: 8132
protocol: TCP
targetPort: 8132
selector:
kamaji.clastix.io/soot: tenant-00
type: LoadBalancer
```
The `LoadBalancer` service type is used to expose the Tenant Control Plane. However, `NodePort` and `ClusterIP` with an Ingress Controller are still viable options, depending on the case. High Availability and rolling updates of the Tenant Control Plane are provided by the `tcp` Deployment and all the resources reconcilied by the Kamaji controller.
### Konnectivity
In addition to the standard control plane containers, Kamaji creates an instance of [konnectivity-server](https://kubernetes.io/docs/concepts/architecture/control-plane-node-communication/) running as sidecar container in the `tcp` pod and exposed on port `8132` of the `tcp` service.
This is required when the tenant worker nodes are not reachable from the `tcp` pods. The Konnectivity service consists of two parts: the Konnectivity server in the tenant control plane pod and the Konnectivity agents running on the tenant worker nodes. After worker nodes joined the tenant control plane, the Konnectivity agents initiate connections to the Konnectivity server and maintain the network connections. After enabling the Konnectivity service, all control plane to worker nodes traffic goes through these connections.
> In Kamaji, Konnectivity is enabled by default and can be disabled when not required.
### Working with Tenant Control Plane
Collect the external IP address of the `tcp` service:
@@ -224,7 +201,7 @@ The `kubeconfig` required to access the Tenant Control Plane is stored in a secr
```bash
kubectl get secrets -n ${TENANT_NAMESPACE} ${TENANT_NAME}-admin-kubeconfig -o json \
| jq -r '.data["admin.conf"]' \
| base64 -d \
| base64 --decode \
> ${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
```
@@ -255,38 +232,36 @@ kubernetes 192.168.32.240:6443 18m
And make sure it is `${TENANT_ADDR}:${TENANT_PORT}`.
### Preparing Worker Nodes to join
### Prepare worker nodes to join
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other IaC tools.
Currently Kamaji does not provide any helper for creation of tenant worker nodes. You should get a set of machines from your infrastructure provider, turn them into worker nodes, and then join to the tenant control plane with the `kubeadm`. In the future, we'll provide integration with Cluster APIs and other tools, as for example, Terraform.
Use bash script `nodes-prerequisites.sh` to install the dependencies on all the worker nodes:
You can use the provided helper script `/deploy/nodes-prerequisites.sh`, in order to install the dependencies on all the worker nodes:
- Install `containerd` as container runtime
- Install `crictl`, the command line for working with `containerd`
- Install `kubectl`, `kubelet`, and `kubeadm` in the desired version
> Warning: we assume worker nodes are machines running `Ubuntu 20.04`
> Warning: the script assumes all worker nodes are running `Ubuntu 20.04`. Make sure to adapt the script if you're using a different distribution.
Run the installation script:
Run the script:
```bash
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
./nodes-prerequisites.sh ${TENANT_VERSION:1} ${HOSTS[@]}
```
### Join Command
The current approach for joining nodes is to use the kubeadm one therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable.
### Join worker nodes
The current approach for joining nodes is to use `kubeadm` and therefore, we will create a bootstrap token to perform the action. In order to facilitate the step, we will store the entire command of joining in a variable:
```bash
JOIN_CMD=$(echo "sudo ")$(kubeadm --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig token create --print-join-command)
```
### Adding Worker Nodes
A bash loop will be used to join all the available nodes.
```bash
HOSTS=(${WORKER0} ${WORKER1} ${WORKER2})
for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
ssh ${USER}@${HOST} -t ${JOIN_CMD};
@@ -299,163 +274,42 @@ Checking the nodes:
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
tenant-00-worker-00 NotReady <none> 25s v1.23.5
tenant-00-worker-01 NotReady <none> 17s v1.23.5
tenant-00-worker-02 NotReady <none> 9s v1.23.5
tenant-00-worker-00 NotReady <none> 25s v1.25.0
tenant-00-worker-01 NotReady <none> 17s v1.25.0
tenant-00-worker-02 NotReady <none> 9s v1.25.0
```
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In our case, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico).
The cluster needs a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) plugin to get the nodes ready. In this guide, we are going to install [calico](https://projectcalico.docs.tigera.io/about/about-calico), but feel free to use one of your taste.
Download the latest stable Calico manifest:
```bash
kubectl apply -f calico-cni/calico-crd.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl apply -f calico-cni/calico.yaml --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/calico.yaml -O
```
And after a while, `kube-system` pods will be running.
Before to apply the Calico manifest, you can customize it as necessary according to your preferences.
Apply to the tenant cluster:
```bash
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-8594699699-dlhbj 1/1 Running 0 3m
calico-node-kxf6n 1/1 Running 0 3m
calico-node-qtdlw 1/1 Running 0 3m
coredns-64897985d-2v5lc 1/1 Running 0 5m
coredns-64897985d-nq276 1/1 Running 0 5m
kube-proxy-cwdww 1/1 Running 0 3m
kube-proxy-m48v4 1/1 Running 0 3m
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig apply -f calico.yaml
```
And the nodes will be ready
And after a while, nodes will be ready
```bash
kubectl get nodes --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
tenant-00-worker-00 Ready <none> 2m48s v1.23.5
tenant-00-worker-01 Ready <none> 2m40s v1.23.5
tenant-00-worker-02 Ready <none> 2m32s v1.23.5
```
## Smoke test
The tenant cluster is now ready to accept workloads.
Export its `kubeconfig` file
```bash
export KUBECONFIG=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
```
#### Deployment
Deploy a `nginx` application on the tenant cluster
```bash
kubectl create deployment nginx --image=nginx
```
and check the `nginx` pod gets scheduled
```bash
kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx-6799fc88d8-4sgcb 1/1 Running 0 33s 172.12.121.1 worker02
```
#### Port Forwarding
Verify the ability to access applications remotely using port forwarding.
Retrieve the full name of the `nginx` pod:
```bash
POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath="{.items[0].metadata.name}")
```
Forward port 8080 on your local machine to port 80 of the `nginx` pod:
```bash
kubectl port-forward $POD_NAME 8080:80
Forwarding from 127.0.0.1:8080 -> 80
Forwarding from [::1]:8080 -> 80
```
In a new terminal make an HTTP request using the forwarding address:
```bash
curl --head http://127.0.0.1:8080
HTTP/1.1 200 OK
Server: nginx/1.21.0
Date: Sat, 19 Jun 2021 08:19:01 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 May 2021 12:28:56 GMT
Connection: keep-alive
ETag: "60aced88-264"
Accept-Ranges: bytes
```
Switch back to the previous terminal and stop the port forwarding to the `nginx` pod.
#### Logs
Verify the ability to retrieve container logs.
Print the `nginx` pod logs:
```bash
kubectl logs $POD_NAME
...
127.0.0.1 - - [19/Jun/2021:08:19:01 +0000] "HEAD / HTTP/1.1" 200 0 "-" "curl/7.68.0" "-"
```
#### Kubelet tunnel
Verify the ability to execute commands in a container.
Print the `nginx` version by executing the `nginx -v` command in the `nginx` container:
```bash
kubectl exec -ti $POD_NAME -- nginx -v
nginx version: nginx/1.21.0
```
#### Services
Verify the ability to expose applications using a service.
Expose the `nginx` deployment using a `NodePort` service:
```bash
kubectl expose deployment nginx --port 80 --type NodePort
```
Retrieve the node port assigned to the `nginx` service:
```bash
NODE_PORT=$(kubectl get svc nginx \
--output=jsonpath='{range .spec.ports[0]}{.nodePort}')
```
Retrieve the IP address of a worker instance and make an HTTP request:
```bash
curl -I http://${WORKER0}:${NODE_PORT}
HTTP/1.1 200 OK
Server: nginx/1.21.0
Date: Sat, 19 Jun 2021 09:29:01 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 May 2021 12:28:56 GMT
Connection: keep-alive
ETag: "60aced88-264"
Accept-Ranges: bytes
tenant-00-worker-00 Ready <none> 2m48s v1.25.0
tenant-00-worker-01 Ready <none> 2m40s v1.25.0
tenant-00-worker-02 Ready <none> 2m32s v1.25.0
```
## Cleanup
Remove the worker nodes joined the tenant control plane
```bash
kubectl delete nodes --all --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig
kubectl --kubeconfig=${TENANT_NAMESPACE}-${TENANT_NAME}.kubeconfig delete nodes --all
```
For each worker node, login and clean it
@@ -475,3 +329,5 @@ Delete the tenant control plane from kamaji
```bash
kubectl delete -f ${TENANT_NAMESPACE}-${TENANT_NAME}-tcp.yaml
```
That's all folks!

View File

@@ -0,0 +1,5 @@
# MySQL as Kubernetes Storage
Kamaji offers the possibility of having a different storage system than `ETCD` thanks to [kine](https://github.com/k3s-io/kine) integration. One of the implementations is [MySQL](https://www.mysql.com/).
> A detailed guide for production setup will be released soon. Please refer to [Getting Started Guide](../getting-started.md) for a demo setup with KinD.

View File

@@ -0,0 +1,6 @@
# PostgreSQL as Kubernetes Storage
Kamaji offers the possibility of having a different storage system than `etcd` thanks to [kine](https://github.com/k3s-io/kine) integration.
One of the implementations is [PostgreSQL](https://www.postgresql.org/).
> A detailed guide for production setup will be released soon. Please refer to [Getting Started Guide](../getting-started.md) for a demo setup with KinD.

View File

@@ -0,0 +1,13 @@
# Tenant Cluster Upgrade
The process of upgrading a _“tenant cluster”_ consists in two steps:
1. Upgrade the Tenant Control Plane
2. Upgrade of Tenant Worker Nodes
## Upgrade of Tenant Control Plane
You should patch the `TenantControlPlane.spec.kubernetes.version` custom resource with a new compatible value according to the [Version Skew Policy](https://kubernetes.io/releases/version-skew-policy/).
> Note: during the upgrade, a new ReplicaSet of Tenant Control Plane pod will be created, so make sure you have at least two pods to avoid service disruption.
## Upgrade of Tenant Worker Nodes
As currently Kamaji is not providing any helpers for Tenant Worker Nodes, you should make sure to upgrade them manually, for example, with the help of `kubeadm`. We have in roadmap, the Cluster APIs support so that you can upgrade _“tenant clusters”_ in a fully declarative way.

View File

Before

Width:  |  Height:  |  Size: 189 KiB

After

Width:  |  Height:  |  Size: 189 KiB

View File

Before

Width:  |  Height:  |  Size: 184 KiB

After

Width:  |  Height:  |  Size: 184 KiB

51
docs/content/index.md Normal file
View File

@@ -0,0 +1,51 @@
# Kamaji
**Kamaji** deploys and operates Kubernetes at scale with a fraction of the operational burden.
## How it works
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. What makes Kamaji special is that Control Planes of _“tenant clusters”_ are just regular pods running in the _“admin cluster”_ instead of dedicated Virtual Machines. This solution makes running control planes at scale cheaper and easier to deploy and operate. View [Concepts](concepts.md) for a deeper understanding of principles behind Kamaji's design.
![Architecture](images/kamaji-light.png#gh-light-mode-only)
![Architecture](images/kamaji-dark.png#gh-dark-mode-only)
All the tenant clusters built with Kamaji are fully compliant [CNCF Certified Kubernetes](https://www.cncf.io/certification/software-conformance/) and are compatible with the standard toolchains everybody knows and loves.
<p align="center" style="padding: 6px 6px">
<img src="https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/certified-kubernetes/versionless/color/certified-kubernetes-color.png" width="200" />
</p>
## Features
- **Self Service Kubernetes:** leave users the freedom to self-provision their Kubernetes clusters according to the assigned boundaries.
- **Multi-cluster Management:** centrally manage multiple tenant clusters from a single admin cluster. Happy SREs.
- **Cheaper Control Planes:** place multiple tenant control planes on a single node, instead of having three nodes for a single control plane.
- **Stronger Multi-Tenancy:** leave tenants to access the control plane with admin permissions while keeping the tenant isolated at the infrastructure level.
- **Kubernetes Inception:** use Kubernetes to manage Kubernetes by re-using all the Kubernetes goodies you already know and love.
- **Full APIs compliant:** tenant clusters are fully CNCF compliant built with upstream Kubernetes binaries. A user does not see differences between a Kamaji provisioned cluster and a dedicated cluster.
## Getting started
Please refer to the [Getting Started guide](getting-started.md) to deploy a minimal setup of Kamaji on [KinD](https://kind.sigs.k8s.io/).
## Open Source
Kamaji is Open Source with Apache 2 license and any contribution is welcome. Open an issue or suggest an enhancement on the GitHub [project's page](https://github.com/clastix/kamaji). Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
## FAQs
Q. What does Kamaji mean?
A. Kamaji is named as the character _Kamaji_ from the Japanese movie [_Spirited Away_](https://en.wikipedia.org/wiki/Spirited_Away).
Q. Is Kamaji another Kubernetes distribution?
A. No, Kamaji is a Kubernetes Operator you can install on top of any Kubernetes cluster to provide hundreds or thousands of managed Kubernetes clusters as a service. We tested Kamaji on vanilla Kubernetes 1.22+, KinD, and Azure AKS. We expect it to work smoothly on other Kubernetes distributions. The tenant clusters made with Kamaji are conformant CNCF Kubernetes clusters as we leverage [`kubeadm`](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/).
Q. Is it safe to run Kubernetes control plane components in a pod instead of dedicated virtual machines?
A. Yes, the tenant control plane components are packaged in the same way they are running in bare metal or virtual nodes. We leverage the `kubeadm` code to set up the control plane components as they were running on their own server. The unchanged images of upstream `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are used.
Q. You already provide a Kubernetes multi-tenancy solution with [Capsule](https://capsule.clastix.io). Why does Kamaji matter?
A. A multi-tenancy solution, like Capsule shares the Kubernetes control plane among all tenants keeping tenant namespaces isolated by policies. While the solution is the right choice by balancing between features and ease of usage, there are cases where a tenant user requires access to the control plane, for example, when a tenant requires to manage CRDs on his own. With Kamaji, you can provide cluster admin permissions to the tenant.
Q. Well you convinced me, how to get a try?
A. It is possible to get started with Kamaji on a laptop with [KinD](getting-started.md) installed.

File diff suppressed because it is too large Load Diff

View File

@@ -39,76 +39,3 @@ Available environment variables are:
| `KAMAJI_HEALTH_PROBE_BIND_ADDRESS` | The address the probe endpoint binds to. (default ":8081") |
| `KAMAJI_LEADER_ELECTION` | Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. |
| `KAMAJI_TMP_DIRECTORY` | Directory which will be used to work with temporary files. (default "/tmp/kamaji") |
## Build and deploy
Clone the repo on your workstation.
```bash
## Install dependencies
$ go mod tidy
## Generate code
$ make generate
## Generate Manifests
$ make manifests
## Install Manifests
$ make install
## Build Docker Image
$ IMG=<image name and tag> make docker-build
## Push Docker Image
$ IMG=<image name and tag> make docker-push
## Deploy Kamaji
$ IMG=<image name and tag> make deploy
## YAML Installation File
$ make yaml-installation-file
```
It will generate a yaml installation file at `config/install.yaml`. It should be customize accordingly.
## Tenant Control Planes
**Kamaji** offers a [CRD](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) to provide a declarative approach of managing tenant control planes. This *CRD* is called `TenantControlPlane`, or `tcp` in short. Use the command `kubectl explain tcp.spec` to understand the fields and their usage.
### Add-ons
**Kamaji** provides optional installations into the deployed tenant control plane through add-ons. Is it possible to enable/disable them through the `tcp` definition.
### Core DNS
```yaml
addons:
coreDNS: {}
```
### Kube-Proxy
```yaml
addons:
kubeProxy: {}
```
### Konnectivity
```yaml
addons:
konnectivity:
proxyPort: 31132 # mandatory
version: v0.0.31
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 100m
memory: 128Mi
serverImage: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-server
agentImage: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent
```

View File

@@ -0,0 +1,70 @@
# Conformance
For organizations using Kubernetes, conformance enables interoperability, consistency, and confirmability between Kubernetes installations. The Cloud Computing Native Foundation - CNCF - provides the [Certified Kubernetes Conformance Program](https://www.cncf.io/certification/software-conformance/).
The standard set of conformance tests is currently those defined by the `[Conformance]` tag in the
[kubernetes e2e](https://github.com/kubernetes/kubernetes/tree/master/test/e2e) suite.
All the _“tenant clusters”_ built with Kamaji are CNCF conformant:
- [v1.23](https://github.com/cncf/k8s-conformance/pull/2194)
- [v1.24](https://github.com/cncf/k8s-conformance/pull/2193)
- [v1.25](https://github.com/cncf/k8s-conformance/pull/2188)
<p align="left" style="padding: 6px 6px">
<img src="https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/certified-kubernetes/versionless/color/certified-kubernetes-color.png" width="100" />
</p>
## Running the conformance tests
The standard tool for running CNCF conformance tests is [Sonobuoy](https://github.com/vmware-tanzu/sonobuoy). Sonobuoy is
regularly built and kept up to date to execute against all currently supported versions of kubernetes.
Download a [binary release](https://github.com/vmware-tanzu/sonobuoy/releases) of the CLI.
Make sure to access your tenant cluster:
```
export KUBECONFIG=tenant.kubeconfig
```
Deploy a Sonobuoy pod to your tenant cluster with:
```
sonobuoy run --mode=certified-conformance
```
> You can run the command synchronously by adding the flag `--wait` but be aware that running the conformance tests can take an hour or more.
View actively running pods:
```
sonobuoy status
```
To inspect the logs:
```
sonobuoy logs -f
```
Once `sonobuoy status` shows the run as `completed`, copy the output directory from the main Sonobuoy pod to a local directory:
```
outfile=$(sonobuoy retrieve)
```
This copies a single `.tar.gz` snapshot from the Sonobuoy pod into your local
`.` directory. Extract the contents into `./results` with:
```
mkdir ./results; tar xzf $outfile -C ./results
```
To clean up Kubernetes objects created by Sonobuoy, run:
```
sonobuoy delete
```

View File

@@ -0,0 +1,2 @@
# Reference
This section of the Kamaji documentation contains references to the project's specifications.

View File

@@ -1,8 +1,10 @@
# Versioning and support
# Versioning
In Kamaji, there are different components that might require independent versioning and support level:
|Kamaji|Admin Cluster|Tenant Cluster (min)|Tenant Cluster (max)|Konnectivity|Tenant etcd |
|------|-------------|--------------------|--------------------|------------|------------|
|0.0.1 |1.22.0+ |1.21.0 |1.23.x |0.0.32 |3.5.4 |
|0.0.1 |1.22.0+ |1.21.0 |1.23.5 |0.0.31 |3.5.4 |
|0.0.2 |1.22.0+ |1.21.0 |1.25.0 |0.0.32 |3.5.4 |
Other combinations might work but have not been tested.
Other combinations might work but they have not been yet tested.

11
docs/content/use-cases.md Normal file
View File

@@ -0,0 +1,11 @@
# Use Cases
Kamaji project has been initially started as a solution for actual and common problems such as minimizing the Total Cost of Ownership while running Kubernetes at large scale. However, it can open a wider range of use cases.
Here are a few:
- **Managed Kubernetes:** enable companies to provide Cloud Native Infrastructure with ease by introducing a strong separation of concerns between management and workloads. Centralize clusters management, monitoring, and observability by leaving developers to focus on applications, increase productivity and reduce operational costs.
- **Kubernetes as a Service:** provide Kubernetes clusters in a self-service fashion by running management and workloads on different infrastructures with the option of Bring Your Own Device, BYOD.
- **Control Plane as a Service:** provide multiple Kubernetes control planes running on top of a single Kubernetes cluster. Tenants who use namespaces based isolation often still need access to cluster wide resources like Cluster Roles, Admission Webhooks, or Custom Resource Definitions.
- **Edge Computing:** distribute Kubernetes workloads across edge computing locations without having to manage multiple clusters across various providers. Centralize management of hundreds of control planes while leaving workloads to run isolated on their own dedicated infrastructure.
- **Cluster Simulation:** check new Kubernetes API or experimental flag or a new tool without impacting production operations. Kamaji will let you simulate such things in a safe and controlled environment.
- **Workloads Testing:** check the behaviour of your workloads on different and multiple versions of Kubernetes with ease by deploying multiple Control Planes in a single cluster.

61
docs/mkdocs.yml Normal file
View File

@@ -0,0 +1,61 @@
site_name: Kamaji
repo_name: clastix/kamaji
repo_url: https://github.com/clastix/kamaji
site_url: https://kamaji.clastix.io/
docs_dir: content
site_dir: site
theme:
name: material
features:
- navigation.tabs
- navigation.tabs.sticky
- navigation.instant
- navigation.sections
include_sidebar: true
palette:
# Palette toggle for automatic mode
- media: "(prefers-color-scheme)"
toggle:
icon: material/brightness-auto
name: Switch to light mode
# Palette toggle for light mode
- media: "(prefers-color-scheme: light)"
scheme: default
toggle:
icon: material/lightbulb
name: Switch to dark mode
# Palette toggle for dark mode
- media: "(prefers-color-scheme: dark)"
scheme: slate
toggle:
icon: material/lightbulb-outline
name: Switch to system preference
# Generate navigation bar
nav:
- 'Kamaji': index.md
- 'Getting started': getting-started.md
- 'Concepts': concepts.md
- 'Guides':
- guides/index.md
- guides/kamaji-deployment-guide.md
- guides/kamaji-azure-deployment-guide.md
- guides/postgresql-datastore.md
- guides/mysql-datastore.md
- guides/upgrade.md
- 'Use Cases': use-cases.md
- 'Reference':
- reference/index.md
- reference/configuration.md
- reference/conformance.md
- reference/versioning.md
- reference/api.md
- 'Contribute':
- contribute/index.md
- contribute/guidelines.md
- contribute/governance.md

2
docs/requirements.txt Normal file
View File

@@ -0,0 +1,2 @@
mkdocs>=1.3.0
mkdocs-material>=8.2.8

1
docs/runtime.txt Normal file
View File

@@ -0,0 +1 @@
3.8

94
docs/templates/reference-cr.tmpl vendored Normal file
View File

@@ -0,0 +1,94 @@
# API Reference
Packages:
{{range .Groups}}
- [{{.Group}}/{{.Version}}](#{{ anchorize (printf "%s/%s" .Group .Version) }})
{{- end -}}{{/* range .Groups */}}
{{- range .Groups }}
{{- $group := . }}
# {{.Group}}/{{.Version}}
Resource Types:
{{range .Kinds}}
- [{{.Name}}](#{{ anchorize .Name }})
{{end}}{{/* range .Kinds */}}
{{range .Kinds}}
{{$kind := .}}
## {{.Name}}
{{range .Types}}
{{if not .IsTopLevel}}
### {{.Name}}
{{end}}
{{.Description}}
<table>
<thead>
<tr>
<th>Name</th>
<th>Type</th>
<th>Description</th>
<th>Required</th>
</tr>
</thead>
<tbody>
{{- if .IsTopLevel -}}
<tr>
<td><b>apiVersion</b></td>
<td>string</td>
<td>{{$group.Group}}/{{$group.Version}}</td>
<td>true</td>
</tr>
<tr>
<td><b>kind</b></td>
<td>string</td>
<td>{{$kind.Name}}</td>
<td>true</td>
</tr>
<tr>
<td><b><a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta">metadata</a></b></td>
<td>object</td>
<td>Refer to the Kubernetes API documentation for the fields of the `metadata` field.</td>
<td>true</td>
</tr>
{{- end -}}
{{- range .Fields -}}
<tr>
<td><b>{{if .TypeKey}}<a href="#{{.TypeKey}}">{{.Name}}</a>{{else}}{{.Name}}{{end}}</b></td>
<td>{{.Type}}</td>
<td>
{{.Description}}<br/>
{{- if or .Schema.Format .Schema.Enum .Schema.Default .Schema.Minimum .Schema.Maximum }}
<br/>
{{- end}}
{{- if .Schema.Format }}
<i>Format</i>: {{ .Schema.Format }}<br/>
{{- end }}
{{- if .Schema.Enum }}
<i>Enum</i>: {{ .Schema.Enum | toStrings | join ", " }}<br/>
{{- end }}
{{- if .Schema.Default }}
<i>Default</i>: {{ .Schema.Default }}<br/>
{{- end }}
{{- if .Schema.Minimum }}
<i>Minimum</i>: {{ .Schema.Minimum }}<br/>
{{- end }}
{{- if .Schema.Maximum }}
<i>Maximum</i>: {{ .Schema.Maximum }}<br/>
{{- end }}
</td>
<td>{{.Required}}</td>
</tr>
{{- end -}}
</tbody>
</table>
{{- end}}{{/* range .Types */}}
{{- end}}{{/* range .Kinds */}}
{{- end}}{{/* range .Groups */}}

14
go.mod
View File

@@ -21,10 +21,8 @@ require (
k8s.io/apiserver v0.25.0
k8s.io/client-go v0.25.0
k8s.io/cluster-bootstrap v0.0.0
k8s.io/component-base v0.25.0
k8s.io/kube-proxy v0.0.0
k8s.io/kubelet v0.0.0
k8s.io/kubernetes v1.23.5
k8s.io/kubernetes v1.25.0
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed
sigs.k8s.io/controller-runtime v0.11.0
)
@@ -44,7 +42,7 @@ require (
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
@@ -53,7 +51,7 @@ require (
github.com/containerd/cgroups v1.0.1 // indirect
github.com/containerd/containerd v1.5.9 // indirect
github.com/coredns/caddy v1.1.0 // indirect
github.com/coredns/corefile-migration v1.0.14 // indirect
github.com/coredns/corefile-migration v1.0.17 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
@@ -106,7 +104,7 @@ require (
github.com/nxadm/tail v1.4.8 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/runc v1.0.2 // indirect
github.com/opencontainers/runc v1.1.3 // indirect
github.com/pelletier/go-toml v1.9.4 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
@@ -154,9 +152,11 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.25.0 // indirect
k8s.io/cli-runtime v0.25.0 // indirect
k8s.io/component-base v0.25.0 // indirect
k8s.io/klog/v2 v2.70.1 // indirect
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
k8s.io/system-validators v1.6.0 // indirect
k8s.io/kube-proxy v0.0.0 // indirect
k8s.io/system-validators v1.7.0 // indirect
mellium.im/sasl v0.3.0 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/kustomize/api v0.12.1 // indirect

193
go.sum
View File

@@ -1,5 +1,4 @@
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@@ -53,19 +52,14 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f
cloud.google.com/go/storage v1.18.2 h1:5NQw6tOn3eMm0oE8vTkfjau18kjL79FlMjy/CHTpmoY=
cloud.google.com/go/storage v1.18.2/go.mod h1:AiIj7BWXyhO5gGVmYJ+S8tbkCx3yb0IMjua8Aw4naVM=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A=
github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg=
github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
@@ -74,22 +68,15 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3/go.mod h1:8XasY4ymP2V/tn2OOV9ZadmiTE1FIB/h3W+yNlPttKw=
github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b/go.mod h1:FNj4KYEAAHfYu68kRYolGoxkaJn+6mdEsaM12VTwuI0=
github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
@@ -103,7 +90,6 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0=
github.com/Microsoft/hcsshim v0.8.23 h1:47MSwtKGXet80aIn+7h4YI6fwPmwIghAnsx2aOUrG2M=
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
@@ -116,7 +102,6 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -124,17 +109,13 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/auth0/go-jwt-middleware v1.0.1/go.mod h1:YSeUX3z6+TF2H+7padiEqNJ73Zy9vXW72U//IgN0BIM=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
@@ -147,14 +128,10 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
@@ -172,9 +149,9 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -183,8 +160,8 @@ github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLI
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -199,7 +176,6 @@ github.com/cncf/xds/go v0.0.0-20211216145620-d92e9ce0af51/go.mod h1:eXthEFrGJvWH
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s=
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
@@ -220,6 +196,7 @@ github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@@ -229,7 +206,6 @@ github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.
github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.11/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
@@ -289,8 +265,8 @@ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgU
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/coredns/caddy v1.1.0 h1:ezvsPrT/tA/7pYDBZxu0cT0VmWk75AfIaf6GSYCNMf0=
github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
github.com/coredns/corefile-migration v1.0.14 h1:Tz3WZhoj2NdP8drrQH86NgnCng+VrPjNeg2Oe1ALKag=
github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE=
github.com/coredns/corefile-migration v1.0.17 h1:tNwh8+4WOANV6NjSljwgW7qViJfhvPUt1kosj4rR8yg=
github.com/coredns/corefile-migration v1.0.17/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -315,6 +291,7 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
@@ -322,7 +299,6 @@ github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjI
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/daviddengcn/go-colortext v1.0.0/go.mod h1:zDqEI5NVUop5QPpVJUxE9UO10hRnmkD5G4Pmri9+m4c=
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
@@ -334,7 +310,6 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.11+incompatible h1:OqzI/g/W54LczvhnccGqniFoQghHx3pklbLuhfXpqGo=
github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@@ -350,7 +325,6 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
@@ -366,19 +340,13 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.6.2 h1:JiO+kJTpmYGjEodY7O1Zk8oZcNz1+f30UtwtXoFUPzE=
github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -386,7 +354,6 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
@@ -420,7 +387,6 @@ github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU=
github.com/go-pg/pg/v10 v10.10.6 h1:1vNtPZ4Z9dWUw/TjJwOfFUbF5nEq1IkR6yG8Mq/Iwso=
github.com/go-pg/pg/v10 v10.10.6/go.mod h1:GLmFXufrElQHf5uzM3BQlcfwV3nsgnHue5uzjQ6Nqxg=
github.com/go-pg/zerochecker v0.2.0 h1:pp7f72c3DobMWOb2ErtZsnrPaSvHd2W4o9//8HtF4mU=
@@ -436,7 +402,7 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -448,9 +414,7 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU=
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -485,16 +449,10 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho=
github.com/golangplus/bytes v1.0.0/go.mod h1:AdRaCFwmc/00ZzELMWb01soso6W1R/++O1XL80yAn+A=
github.com/golangplus/fmt v1.0.0/go.mod h1:zpM0OfbMCjPtd2qkTD/jX2MgiFCqklhSUFyDW44gVQE=
github.com/golangplus/testing v1.0.0/go.mod h1:ZDreixUV3YzhoVraIDyOzHrr76p6NUh6k/pPg/Q3gYA=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cadvisor v0.43.0/go.mod h1:+RdMSbc3FVr5NYCD2dOEJy/LI0jYJ/0xJXkzWXEyiFQ=
github.com/google/cel-go v0.12.4/go.mod h1:Av7CU6r6X3YmcHR9GXqVDaEJYfEtSxl6wvIjUQTriCw=
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -548,17 +506,12 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720 h1:zC34cGQu69FG7qzJ3WiKW244WfhDC3xxYMeNOX2gtUQ=
github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
@@ -593,8 +546,6 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/heketi/heketi v10.3.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o=
github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@@ -607,15 +558,12 @@ github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -632,8 +580,6 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@@ -652,12 +598,10 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY=
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA=
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@@ -672,29 +616,24 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM=
@@ -705,7 +644,6 @@ github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdx
github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo=
github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -715,7 +653,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
@@ -725,7 +662,6 @@ github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
@@ -736,14 +672,11 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
@@ -752,7 +685,6 @@ github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3
github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY=
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
@@ -775,8 +707,9 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg=
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -787,11 +720,11 @@ github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mo
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
@@ -830,7 +763,6 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -846,14 +778,9 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI=
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -861,8 +788,8 @@ github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiB
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
@@ -874,7 +801,6 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@@ -887,14 +813,12 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z
github.com/spf13/afero v1.7.0 h1:xc1yh8vgcNB8yQ+UqY4cpD56Ogo573e+CJ/C4YmMFTg=
github.com/spf13/afero v1.7.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
@@ -906,17 +830,14 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk=
github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/storageos/go-api v2.2.0+incompatible/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY=
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -941,7 +862,6 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
@@ -956,7 +876,6 @@ github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vb
github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -971,7 +890,6 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
@@ -981,15 +899,11 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc=
go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg=
go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU=
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4=
go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY=
go.etcd.io/etcd/pkg/v3 v3.5.4/go.mod h1:OI+TtO+Aa3nhQSppMbwE4ld3uF1/fqqwbpfndbbrEe0=
@@ -1037,7 +951,6 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -1046,24 +959,17 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38=
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
@@ -1071,8 +977,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20210220032938-85be41e4509f/go.mod h1:I6l2HNBLBZEcrOoCpyKLdY2lHoRZ8lI4x60KMCQDft4=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1089,15 +993,12 @@ golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPI
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -1156,13 +1057,10 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
@@ -1177,8 +1075,6 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
@@ -1205,7 +1101,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1258,7 +1153,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1284,7 +1178,6 @@ golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1293,15 +1186,15 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210923061019-b8560ed6a9b7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1311,7 +1204,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1328,15 +1220,12 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@@ -1361,7 +1250,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -1396,7 +1284,6 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1406,12 +1293,6 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1N
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@@ -1434,8 +1315,6 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I=
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
@@ -1445,7 +1324,6 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=
google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4=
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
google.golang.org/api v0.63.0 h1:n2bqqK895ygnBpdPDYetfy23K7fJ22wsrZKCyfuRkkA=
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
@@ -1504,7 +1382,6 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
@@ -1523,7 +1400,6 @@ google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEc
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
@@ -1588,12 +1464,10 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8X
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI=
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
@@ -1604,7 +1478,6 @@ gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -1645,62 +1518,36 @@ k8s.io/cli-runtime v0.25.0 h1:XBnTc2Fi+w818jcJGzhiJKQuXl8479sZ4FhtV5hVJ1Q=
k8s.io/cli-runtime v0.25.0/go.mod h1:bHOI5ZZInRHhbq12OdUiYZQN8ml8aKZLwQgt9QlLINw=
k8s.io/client-go v0.25.0 h1:CVWIaCETLMBNiTUta3d5nzRbXvY5Hy9Dpl+VvREpu5E=
k8s.io/client-go v0.25.0/go.mod h1:lxykvypVfKilxhTklov0wz1FoaUZ8X4EwbhS6rpRfN8=
k8s.io/cloud-provider v0.25.0/go.mod h1:afVfVCIYOUER914WmSp0QpAtJn12gv4qu9NMT4XBxZo=
k8s.io/cluster-bootstrap v0.25.0 h1:KJ2/r0dV+bLfTK5EBobAVKvjGel3N4Qqh3bvnzh9qPk=
k8s.io/cluster-bootstrap v0.25.0/go.mod h1:x/TCtY3EiuR/rODkA3SvVQT3uSssQLf9cXcmSjdDTe0=
k8s.io/code-generator v0.25.0/go.mod h1:B6jZgI3DvDFAualltPitbYMQ74NjaCFxum3YeKZZ+3w=
k8s.io/component-base v0.25.0 h1:haVKlLkPCFZhkcqB6WCvpVxftrg6+FK5x1ZuaIDaQ5Y=
k8s.io/component-base v0.25.0/go.mod h1:F2Sumv9CnbBlqrpdf7rKZTmmd2meJq0HizeyY/yAFxk=
k8s.io/component-helpers v0.25.0/go.mod h1:auaFj2bvb5Zmy0mLk4WJNmwP0w4e7Zk+/Tu9FFBGA20=
k8s.io/controller-manager v0.25.0/go.mod h1:QElCivPrZ64NP1Y976pkgyViZUqn6UcvjlXHiAAUGd0=
k8s.io/cri-api v0.25.0/go.mod h1:J1rAyQkSJ2Q6I+aBMOVgg2/cbbebso6FNa0UagiR0kc=
k8s.io/csi-translation-lib v0.25.0/go.mod h1:Wb80CDywP4753F6wWkIyOuJIQtQAbhgw985veSgAn/4=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ=
k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-aggregator v0.25.0/go.mod h1:dfdl4aQkleiWK/U++UDLdDC8g2rsonhkB23zzUeBCgM=
k8s.io/kube-controller-manager v0.25.0/go.mod h1:SjL1hKSG2z9wajnvjRHZv1zOsdDHjmbZd1ykmaYO6J8=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
k8s.io/kube-openapi v0.0.0-20220401212409-b28bf2818661/go.mod h1:daOouuuwd9JXpv1L7Y34iV3yf6nxzipkKMWWlqlvK9M=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
k8s.io/kube-proxy v0.25.0 h1:QuoKEyXV+NNMXEh8oqlthUlHkmWF+WBnYUMHCf817k0=
k8s.io/kube-proxy v0.25.0/go.mod h1:uHv1HwMVDYgl1pU2PTDKLRlxtNOf4z2M5YPYC6NP1CU=
k8s.io/kube-scheduler v0.25.0/go.mod h1:cwiyJeImgFbhmbnImzvuhbiJayNngRNEe3FJkZDPw9Y=
k8s.io/kubectl v0.25.0/go.mod h1:n16ULWsOl2jmQpzt2o7Dud1t4o0+Y186ICb4O+GwKAU=
k8s.io/kubelet v0.25.0 h1:eTS5B1u1o63ndExAHKLJytzz/GBy86ROcxYtu0VK3RA=
k8s.io/kubelet v0.25.0/go.mod h1:J6aQxrZdSsGPrskYrhZdEn6PCnGha+GNvF0g9aWfQnw=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/kubernetes v1.23.5 h1:bxpSv2BKc2MqYRfyqQqLVdodLZ2r+NZ/rEdZXyUAvug=
k8s.io/kubernetes v1.23.5/go.mod h1:avI3LUTUYZugxwh52KMVM7v9ZjB5gYJ6D3FIoZ1SHUo=
k8s.io/legacy-cloud-providers v0.25.0/go.mod h1:bnmUgHHeBmK3M9JgQzu+ne6UCUVURDzkpF0Y7VeypVE=
k8s.io/metrics v0.25.0/go.mod h1:HZZrbhuRX+fsDcRc3u59o2FbrKhqD67IGnoFECNmovc=
k8s.io/mount-utils v0.25.0/go.mod h1:WTYq8Ev/JrnkqK2h1jFUnC8qWGuqzMb9XDC+Lu3WNU0=
k8s.io/pod-security-admission v0.25.0/go.mod h1:b/UC586Th2LijoNV+ssyyAryUvmaTrEWms5ZzBEkVsA=
k8s.io/sample-apiserver v0.25.0/go.mod h1:Wyy/yKmXCrWLcc+082Vsn6fxAuwraRw5FQpekHg3go8=
k8s.io/system-validators v1.6.0 h1:21qaPNdZ+mQrm4qc5shU0T5Eh49t/miFqZsn4sW8Hr0=
k8s.io/system-validators v1.6.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q=
k8s.io/kubernetes v1.25.0 h1:NwTRyLrdXTORd5V7DLlUltxDbl/KZjYDiRgwI+pBYGE=
k8s.io/kubernetes v1.25.0/go.mod h1:UdtILd5Zg1vGZvShiO1EYOqmjzM2kZOG1hzwQnM5JxY=
k8s.io/system-validators v1.7.0 h1:tYD3hojdYc58CKtiEsh7BWW6Pg0x6xbclUiwjaIiiYo=
k8s.io/system-validators v1.7.0/go.mod h1:gP1Ky+R9wtrSiFbrpEPwWMeYz9yqyy1S/KOh0Vci7WI=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4=
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ=
mellium.im/sasl v0.3.0 h1:0qoaTCTo5Py7u/g0cBIQZcMOgG/5LM71nshbXwznBh8=
mellium.im/sasl v0.3.0/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.32/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
@@ -1710,12 +1557,8 @@ sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM=
sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s=
sigs.k8s.io/kustomize/cmd/config v0.10.9/go.mod h1:T0s850zPV3wKfBALA0dyeP/K74jlJcoP8Pr9ZWwE3MQ=
sigs.k8s.io/kustomize/kustomize/v4 v4.5.7/go.mod h1:VSNKEH9D9d9bLiWEGbS6Xbg/Ih0tgQalmPvntzRxZ/Q=
sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk=
sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View File

@@ -143,7 +143,7 @@ func (d *Deployment) buildPKIVolume(podSpec *corev1.PodSpec, tcp *kamajiv1alpha1
VolumeSource: corev1.VolumeSource{
Projected: &corev1.ProjectedVolumeSource{
Sources: sources,
DefaultMode: pointer.Int32Ptr(420),
DefaultMode: pointer.Int32(420),
},
},
}
@@ -159,7 +159,7 @@ func (d *Deployment) buildCAVolume(podSpec *corev1.PodSpec, tcp *kamajiv1alpha1.
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: tcp.Status.Certificates.CA.SecretName,
DefaultMode: pointer.Int32Ptr(420),
DefaultMode: pointer.Int32(420),
},
},
}
@@ -175,7 +175,7 @@ func (d *Deployment) buildSSLCertsVolume(podSpec *corev1.PodSpec, tcp *kamajiv1a
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: tcp.Status.Certificates.CA.SecretName,
DefaultMode: pointer.Int32Ptr(420),
DefaultMode: pointer.Int32(420),
},
},
}
@@ -191,7 +191,7 @@ func (d *Deployment) buildShareCAVolume(podSpec *corev1.PodSpec, tcp *kamajiv1al
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: tcp.Status.Certificates.CA.SecretName,
DefaultMode: pointer.Int32Ptr(420),
DefaultMode: pointer.Int32(420),
},
},
}
@@ -207,7 +207,7 @@ func (d *Deployment) buildLocalShareCAVolume(podSpec *corev1.PodSpec, tcp *kamaj
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: tcp.Status.Certificates.CA.SecretName,
DefaultMode: pointer.Int32Ptr(420),
DefaultMode: pointer.Int32(420),
},
},
}
@@ -223,7 +223,7 @@ func (d *Deployment) buildSchedulerVolume(podSpec *corev1.PodSpec, tcp *kamajiv1
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: tcp.Status.KubeConfig.Scheduler.SecretName,
DefaultMode: pointer.Int32Ptr(420),
DefaultMode: pointer.Int32(420),
},
},
}
@@ -239,7 +239,7 @@ func (d *Deployment) buildControllerManagerVolume(podSpec *corev1.PodSpec, tcp *
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: tcp.Status.KubeConfig.ControllerManager.SecretName,
DefaultMode: pointer.Int32Ptr(420),
DefaultMode: pointer.Int32(420),
},
},
}
@@ -542,12 +542,13 @@ func (d *Deployment) buildKubeAPIServerCommand(tenantControlPlane *kamajiv1alpha
"--kubelet-preferred-address-types": "Hostname,InternalIP,ExternalIP",
"--proxy-client-cert-file": path.Join(v1beta3.DefaultCertificatesDir, constants.FrontProxyClientCertName),
"--proxy-client-key-file": path.Join(v1beta3.DefaultCertificatesDir, constants.FrontProxyClientKeyName),
"--requestheader-allowed-names": "front-proxy-client",
"--requestheader-allowed-names": constants.FrontProxyClientCertCommonName,
"--requestheader-client-ca-file": path.Join(v1beta3.DefaultCertificatesDir, constants.FrontProxyCACertName),
"--requestheader-extra-headers-prefix": "X-Remote-Extra-",
"--requestheader-group-headers": "X-Remote-Group",
"--requestheader-username-headers": "X-Remote-User",
"--secure-port": fmt.Sprintf("%d", tenantControlPlane.Spec.NetworkProfile.Port),
"--service-account-issuer": fmt.Sprintf("https://localhost:%d", tenantControlPlane.Spec.NetworkProfile.Port),
"--service-account-issuer": "https://kubernetes.default.svc.cluster.local",
"--service-account-key-file": path.Join(v1beta3.DefaultCertificatesDir, constants.ServiceAccountPublicKeyName),
"--service-account-signing-key-file": path.Join(v1beta3.DefaultCertificatesDir, constants.ServiceAccountPrivateKeyName),
"--tls-cert-file": path.Join(v1beta3.DefaultCertificatesDir, constants.APIServerCertName),
@@ -564,6 +565,7 @@ func (d *Deployment) buildKubeAPIServerCommand(tenantControlPlane *kamajiv1alpha
httpsEndpoints = append(httpsEndpoints, fmt.Sprintf("https://%s", ep))
}
desiredArgs["--etcd-compaction-interval"] = "0"
desiredArgs["--etcd-prefix"] = fmt.Sprintf("/%s", tenantControlPlane.GetName())
desiredArgs["--etcd-servers"] = strings.Join(httpsEndpoints, ",")
desiredArgs["--etcd-cafile"] = "/etc/kubernetes/pki/etcd/ca.crt"
@@ -617,7 +619,7 @@ func (d *Deployment) buildKineVolume(podSpec *corev1.PodSpec, tcp *kamajiv1alpha
podSpec.Volumes[index].VolumeSource = corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: tcp.Status.Storage.Certificate.SecretName,
DefaultMode: pointer.Int32Ptr(420),
DefaultMode: pointer.Int32(420),
},
}
if d.DataStore.Spec.Driver == kamajiv1alpha1.EtcdDriver {
@@ -772,6 +774,18 @@ func (d *Deployment) SetAnnotations(resource *appsv1.Deployment, annotations map
resource.SetAnnotations(annotations)
}
func (d *Deployment) SetTopologySpreadConstraints(spec *appsv1.DeploymentSpec, topologies []corev1.TopologySpreadConstraint) {
defaultSelector := spec.Selector
for index, topology := range topologies {
if topology.LabelSelector == nil {
topologies[index].LabelSelector = defaultSelector
}
}
spec.Template.Spec.TopologySpreadConstraints = topologies
}
// ResetKubeAPIServerFlags ensures that upon a change of the kube-apiserver extra flags the desired ones are properly
// applied, also considering that the container could be lately patched by the konnectivity addon resources.
func (d *Deployment) ResetKubeAPIServerFlags(resource *appsv1.Deployment, tcp *kamajiv1alpha1.TenantControlPlane) {
@@ -804,3 +818,15 @@ func (d *Deployment) ResetKubeAPIServerFlags(resource *appsv1.Deployment, tcp *k
resource.GetAnnotations()[apiServerFlagsAnnotation] = fmt.Sprintf("%d", len(tcp.Spec.ControlPlane.Deployment.ExtraArgs.APIServer))
}
func (d *Deployment) SetNodeSelector(spec *corev1.PodSpec, tcp *kamajiv1alpha1.TenantControlPlane) {
spec.NodeSelector = tcp.Spec.ControlPlane.Deployment.NodeSelector
}
func (d *Deployment) SetToleration(spec *corev1.PodSpec, tcp *kamajiv1alpha1.TenantControlPlane) {
spec.Tolerations = tcp.Spec.ControlPlane.Deployment.Tolerations
}
func (d *Deployment) SetAffinity(spec *corev1.PodSpec, tcp *kamajiv1alpha1.TenantControlPlane) {
spec.Affinity = tcp.Spec.ControlPlane.Deployment.Affinity
}

View File

@@ -0,0 +1,10 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package constants
const (
// Checksum is the annotation label that we use to store the checksum for the resource:
// it allows to check by comparing it if the resource has been changed and must be aligned with the reconciliation.
Checksum = "kamaji.clastix.io/checksum"
)

View File

@@ -5,103 +5,31 @@ package crypto
import (
"bytes"
"crypto/rand"
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/big"
mathrand "math/rand"
"time"
"github.com/pkg/errors"
)
const (
certBitSize = 2048
)
func GetCertificateAndKeyPair(template *x509.Certificate, caCert []byte, caPrivKey []byte) (*bytes.Buffer, *bytes.Buffer, error) {
caCertBytes, err := GetCertificate(caCert)
if err != nil {
return nil, nil, err
// CheckPublicAndPrivateKeyValidity checks if the given bytes for the private and public keys are valid.
func CheckPublicAndPrivateKeyValidity(publicKey []byte, privateKey []byte) (bool, error) {
if len(publicKey) == 0 || len(privateKey) == 0 {
return false, nil
}
caPrivKeyBytes, err := GetPrivateKey(caPrivKey)
if err != nil {
return nil, nil, err
}
return GenerateCertificateKeyPairBytes(template, certBitSize, caCertBytes, caPrivKeyBytes)
}
func GetCertificate(cert []byte) (*x509.Certificate, error) {
pemContent, _ := pem.Decode(cert)
if pemContent == nil {
return nil, fmt.Errorf("no right PEM block")
}
return x509.ParseCertificate(pemContent.Bytes)
}
func GetPrivateKey(privKey []byte) (*rsa.PrivateKey, error) {
pemContent, _ := pem.Decode(privKey)
if pemContent == nil {
return nil, fmt.Errorf("no right PEM block")
}
return x509.ParsePKCS1PrivateKey(pemContent.Bytes)
}
func GetPublickKey(pubKey []byte) (*rsa.PublicKey, error) {
pemContent, _ := pem.Decode(pubKey)
if pemContent == nil {
return nil, fmt.Errorf("no right PEM block")
}
pub, err := x509.ParsePKIXPublicKey(pemContent.Bytes)
if err != nil {
return nil, err
}
return pub.(*rsa.PublicKey), nil //nolint:forcetypeassert
}
func GenerateCertificateKeyPairBytes(template *x509.Certificate, bitSize int, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*bytes.Buffer, *bytes.Buffer, error) {
certPrivKey, err := rsa.GenerateKey(rand.Reader, bitSize)
if err != nil {
return nil, nil, err
}
certBytes, err := x509.CreateCertificate(rand.Reader, template, caCert, &certPrivKey.PublicKey, caKey)
if err != nil {
return nil, nil, err
}
certPEM := &bytes.Buffer{}
if err := pem.Encode(certPEM, &pem.Block{
Type: "CERTIFICATE",
Headers: nil,
Bytes: certBytes,
}); err != nil {
return nil, nil, err
}
certPrivKeyPEM := &bytes.Buffer{}
if err := pem.Encode(certPrivKeyPEM, &pem.Block{
Type: "RSA PRIVATE KEY",
Headers: nil,
Bytes: x509.MarshalPKCS1PrivateKey(certPrivKey),
}); err != nil {
return nil, nil, err
}
return certPEM, certPrivKeyPEM, nil
}
func IsValidKeyPairBytes(pubKeyBytes []byte, privKeyBytes []byte) (bool, error) {
privKey, err := GetPrivateKey(privKeyBytes)
pubKey, err := ParsePublicKeyBytes(publicKey)
if err != nil {
return false, err
}
pubKey, err := GetPublickKey(pubKeyBytes)
privKey, err := ParsePrivateKeyBytes(privateKey)
if err != nil {
return false, err
}
@@ -109,22 +37,134 @@ func IsValidKeyPairBytes(pubKeyBytes []byte, privKeyBytes []byte) (bool, error)
return checkPublicKeys(privKey.PublicKey, *pubKey), nil
}
func IsValidCertificateKeyPairBytes(certBytes []byte, privKeyBytes []byte) (bool, error) {
cert, err := GetCertificate(certBytes)
if err != nil {
return false, err
// CheckCertificateAndPrivateKeyPairValidity checks if the certificate and private key pair are valid.
func CheckCertificateAndPrivateKeyPairValidity(certificate []byte, privateKey []byte) (bool, error) {
switch {
case len(certificate) == 0, len(privateKey) == 0:
return false, nil
default:
return IsValidCertificateKeyPairBytes(certificate, privateKey)
}
privKey, err := GetPrivateKey(privKeyBytes)
if err != nil {
return false, err
}
return isValidCertificateKeyPairBytes(*cert, *privKey), nil
}
func isValidCertificateKeyPairBytes(cert x509.Certificate, privKey rsa.PrivateKey) bool {
return checkCertificateValidity(cert) && checkCertificateKeyPair(cert, privKey)
// GenerateCertificatePrivateKeyPair starts from the Certificate Authority bytes a certificate using the provided
// template, returning the bytes both for the certificate and its key.
func GenerateCertificatePrivateKeyPair(template *x509.Certificate, caCertificate []byte, caPrivateKey []byte) (*bytes.Buffer, *bytes.Buffer, error) {
caCertBytes, err := ParseCertificateBytes(caCertificate)
if err != nil {
return nil, nil, err
}
caPrivKeyBytes, err := ParsePrivateKeyBytes(caPrivateKey)
if err != nil {
return nil, nil, errors.Wrap(err, "provided CA private key for certificate generation cannot be parsed")
}
return generateCertificateKeyPairBytes(template, caCertBytes, caPrivKeyBytes)
}
// ParseCertificateBytes takes the certificate bytes returning a x509 certificate by parsing it.
func ParseCertificateBytes(content []byte) (*x509.Certificate, error) {
pemContent, _ := pem.Decode(content)
if pemContent == nil {
return nil, fmt.Errorf("no right PEM block")
}
crt, err := x509.ParseCertificate(pemContent.Bytes)
if err != nil {
return nil, errors.Wrap(err, "cannot parse x509 Certificate")
}
return crt, nil
}
// ParsePrivateKeyBytes takes the private key bytes returning an RSA private key by parsing it.
func ParsePrivateKeyBytes(content []byte) (*rsa.PrivateKey, error) {
pemContent, _ := pem.Decode(content)
if pemContent == nil {
return nil, fmt.Errorf("no right PEM block")
}
privateKey, err := x509.ParsePKCS1PrivateKey(pemContent.Bytes)
if err != nil {
return nil, errors.Wrap(err, "cannot parse PKCS1 Private Key")
}
return privateKey, nil
}
// ParsePublicKeyBytes takes the public key bytes returning an RSA public key by parsing it.
func ParsePublicKeyBytes(content []byte) (*rsa.PublicKey, error) {
pemContent, _ := pem.Decode(content)
if pemContent == nil {
return nil, fmt.Errorf("no right PEM block")
}
publicKey, err := x509.ParsePKIXPublicKey(pemContent.Bytes)
if err != nil {
return nil, err
}
rsaPublicKey, ok := publicKey.(*rsa.PublicKey)
if !ok {
return nil, fmt.Errorf("expected *rsa.PublicKey, got %T", rsaPublicKey)
}
return rsaPublicKey, nil
}
// IsValidCertificateKeyPairBytes checks if the certificate matches the private key bounded to it.
func IsValidCertificateKeyPairBytes(certificateBytes []byte, privateKeyBytes []byte) (bool, error) {
crt, err := ParseCertificateBytes(certificateBytes)
if err != nil {
return false, err
}
key, err := ParsePrivateKeyBytes(privateKeyBytes)
if err != nil {
return false, err
}
switch {
case !checkCertificateValidity(*crt):
return false, nil
case !checkPublicKeys(*crt.PublicKey.(*rsa.PublicKey), key.PublicKey): //nolint:forcetypeassert
return false, nil
default:
return true, nil
}
}
func generateCertificateKeyPairBytes(template *x509.Certificate, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*bytes.Buffer, *bytes.Buffer, error) {
certPrivKey, err := rsa.GenerateKey(cryptorand.Reader, 2048)
if err != nil {
return nil, nil, errors.Wrap(err, "cannot generate an RSA key")
}
certBytes, err := x509.CreateCertificate(cryptorand.Reader, template, caCert, &certPrivKey.PublicKey, caKey)
if err != nil {
return nil, nil, errors.Wrap(err, "cannot create the certificate")
}
certPEM := &bytes.Buffer{}
if err = pem.Encode(certPEM, &pem.Block{
Type: "CERTIFICATE",
Headers: nil,
Bytes: certBytes,
}); err != nil {
return nil, nil, errors.Wrap(err, "cannot encode the generate certificate bytes")
}
certPrivKeyPEM := &bytes.Buffer{}
if err = pem.Encode(certPrivKeyPEM, &pem.Block{
Type: "RSA PRIVATE KEY",
Headers: nil,
Bytes: x509.MarshalPKCS1PrivateKey(certPrivKey),
}); err != nil {
return nil, nil, errors.Wrap(err, "cannot encode private key")
}
return certPEM, certPrivKeyPEM, nil
}
func checkCertificateValidity(cert x509.Certificate) bool {
@@ -133,13 +173,31 @@ func checkCertificateValidity(cert x509.Certificate) bool {
return now.Before(cert.NotAfter) && now.After(cert.NotBefore)
}
func checkCertificateKeyPair(cert x509.Certificate, privKey rsa.PrivateKey) bool {
return checkPublicKeys(*cert.PublicKey.(*rsa.PublicKey), privKey.PublicKey) //nolint:forcetypeassert
}
func checkPublicKeys(a rsa.PublicKey, b rsa.PublicKey) bool {
isN := a.N.Cmp(b.N) == 0
isE := a.E == b.E
return isN && isE
}
// NewCertificateTemplate returns the template that must be used to generate a certificate,
// used to perform the authentication against the DataStore.
func NewCertificateTemplate(commonName string) *x509.Certificate {
return &x509.Certificate{
PublicKeyAlgorithm: x509.RSA,
SerialNumber: big.NewInt(mathrand.Int63()),
Subject: pkix.Name{
CommonName: commonName,
Organization: []string{"system:masters"},
},
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(10, 0, 0),
SubjectKeyId: []byte{1, 2, 3, 4, 6},
ExtKeyUsage: []x509.ExtKeyUsage{
x509.ExtKeyUsageClientAuth,
x509.ExtKeyUsageServerAuth,
x509.ExtKeyUsageCodeSigning,
},
KeyUsage: x509.KeyUsageDigitalSignature,
}
}

View File

@@ -0,0 +1,50 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package errors
import "github.com/pkg/errors"
func NewCreateUserError(err error) error {
return errors.Wrap(err, "cannot create user")
}
func NewGrantPrivilegesError(err error) error {
return errors.Wrap(err, "cannot grant privileges")
}
func NewCheckUserExistsError(err error) error {
return errors.Wrap(err, "cannot check if user exists")
}
func NewCheckGrantExistsError(err error) error {
return errors.Wrap(err, "cannot check if grant exists")
}
func NewDeleteUserError(err error) error {
return errors.Wrap(err, "cannot delete user")
}
func NewCannotDeleteDatabaseError(err error) error {
return errors.Wrap(err, "cannot delete database")
}
func NewCheckDatabaseExistError(err error) error {
return errors.Wrap(err, "cannot check if database exists")
}
func NewRevokePrivilegesError(err error) error {
return errors.Wrap(err, "cannot revoke privileges")
}
func NewCloseConnectionError(err error) error {
return errors.Wrap(err, "cannot close connection")
}
func NewCheckConnectionError(err error) error {
return errors.Wrap(err, "cannot check connection")
}
func NewCreateDBError(err error) error {
return errors.Wrap(err, "cannot create database")
}

View File

@@ -7,12 +7,13 @@ import (
"context"
"fmt"
"github.com/pkg/errors"
goerrors "github.com/pkg/errors"
"go.etcd.io/etcd/api/v3/authpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
etcdclient "go.etcd.io/etcd/client/v3"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/datastore/errors"
)
const (
@@ -49,66 +50,64 @@ type EtcdClient struct {
}
func (e *EtcdClient) CreateUser(ctx context.Context, user, password string) error {
_, err := e.Client.Auth.UserAddWithOptions(ctx, user, password, &etcdclient.UserAddOptions{
NoPassword: true,
})
if _, err := e.Client.Auth.UserAddWithOptions(ctx, user, password, &etcdclient.UserAddOptions{NoPassword: true}); err != nil {
return errors.NewCreateUserError(err)
}
return err
return nil
}
func (e *EtcdClient) CreateDB(ctx context.Context, dbName string) error {
func (e *EtcdClient) CreateDB(context.Context, string) error {
return nil
}
func (e *EtcdClient) GrantPrivileges(ctx context.Context, user, dbName string) error {
_, err := e.Client.Auth.RoleAdd(ctx, dbName)
if err != nil {
return err
if _, err := e.Client.Auth.RoleAdd(ctx, dbName); err != nil {
return errors.NewGrantPrivilegesError(err)
}
permission := etcdclient.PermissionType(authpb.READWRITE)
key := e.buildKey(dbName)
if _, err = e.Client.RoleGrantPermission(ctx, user, key, rangeEnd, permission); err != nil {
return err
if _, err := e.Client.RoleGrantPermission(ctx, user, key, rangeEnd, permission); err != nil {
return errors.NewGrantPrivilegesError(err)
}
if _, err = e.Client.UserGrantRole(ctx, user, dbName); err != nil {
return err
if _, err := e.Client.UserGrantRole(ctx, user, dbName); err != nil {
return errors.NewGrantPrivilegesError(err)
}
return err
return nil
}
func (e *EtcdClient) UserExists(ctx context.Context, user string) (bool, error) {
_, err := e.Client.UserGet(ctx, user)
if err != nil {
if errors.As(err, &rpctypes.ErrGRPCUserNotFound) {
if _, err := e.Client.UserGet(ctx, user); err != nil {
if goerrors.As(err, &rpctypes.ErrGRPCUserNotFound) {
return false, nil
}
return false, err
return false, errors.NewCheckUserExistsError(err)
}
return true, nil
}
func (e *EtcdClient) DBExists(_ context.Context, dbName string) (bool, error) {
func (e *EtcdClient) DBExists(context.Context, string) (bool, error) {
return true, nil
}
func (e *EtcdClient) GrantPrivilegesExists(ctx context.Context, username, dbName string) (bool, error) {
_, err := e.Client.RoleGet(ctx, dbName)
if err != nil {
if errors.As(err, &rpctypes.ErrGRPCRoleNotFound) {
if goerrors.As(err, &rpctypes.ErrGRPCRoleNotFound) {
return false, nil
}
return false, err
return false, errors.NewCheckGrantExistsError(err)
}
user, err := e.Client.UserGet(ctx, username)
if err != nil {
return false, err
return false, errors.NewCheckGrantExistsError(err)
}
for _, i := range user.Roles {
@@ -121,23 +120,29 @@ func (e *EtcdClient) GrantPrivilegesExists(ctx context.Context, username, dbName
}
func (e *EtcdClient) DeleteUser(ctx context.Context, user string) error {
_, err := e.Client.Auth.UserDelete(ctx, user)
if _, err := e.Client.Auth.UserDelete(ctx, user); err != nil {
return errors.NewDeleteUserError(err)
}
return err
return nil
}
func (e *EtcdClient) DeleteDB(ctx context.Context, dbName string) error {
withRange := etcdclient.WithRange(rangeEnd)
prefix := e.buildKey(dbName)
_, err := e.Client.Delete(ctx, prefix, withRange)
if _, err := e.Client.Delete(ctx, prefix, withRange); err != nil {
return errors.NewCannotDeleteDatabaseError(err)
}
return err
return nil
}
func (e *EtcdClient) RevokePrivileges(ctx context.Context, user, dbName string) error {
_, err := e.Client.Auth.RoleDelete(ctx, dbName)
if _, err := e.Client.Auth.RoleDelete(ctx, dbName); err != nil {
return errors.NewRevokePrivilegesError(err)
}
return err
return nil
}
func (e *EtcdClient) GetConnectionString() string {
@@ -147,13 +152,19 @@ func (e *EtcdClient) GetConnectionString() string {
}
func (e *EtcdClient) Close() error {
return e.Client.Close()
if err := e.Client.Close(); err != nil {
return errors.NewCloseConnectionError(err)
}
return nil
}
func (e *EtcdClient) Check(ctx context.Context) error {
_, err := e.Client.AuthStatus(ctx)
if _, err := e.Client.AuthStatus(ctx); err != nil {
return errors.NewCheckConnectionError(err)
}
return err
return nil
}
func (e *EtcdClient) Driver() string {
@@ -163,9 +174,3 @@ func (e *EtcdClient) Driver() string {
func (e *EtcdClient) buildKey(roleName string) string {
return fmt.Sprintf("/%s/", roleName)
}
type Permission struct {
Type int `json:"type,omitempty"`
Key string `json:"key,omitempty"`
RangeEnd string `json:"rangeEnd,omitempty"`
}

View File

@@ -9,10 +9,10 @@ import (
"fmt"
"net/url"
"github.com/go-pg/pg/v10"
"github.com/go-sql-driver/mysql"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/datastore/errors"
)
const (
@@ -41,18 +41,6 @@ func (c *MySQLConnection) Driver() string {
return string(kamajiv1alpha1.KineMySQLDriver)
}
func NewPostgreSQLConnection(config ConnectionConfig) (Connection, error) {
opt := &pg.Options{
Addr: config.Endpoints[0].String(),
Database: config.DBName,
User: config.User,
Password: config.Password,
TLSConfig: config.TLSConfig,
}
return &PostgreSQLConnection{db: pg.Connect(opt), connection: config.Endpoints[0]}, nil
}
func NewMySQLConnection(config ConnectionConfig) (Connection, error) {
nameDB := fmt.Sprintf("%s(%s)", defaultProtocol, config.Endpoints[0].String())
@@ -91,23 +79,43 @@ func (c *MySQLConnection) GetConnectionString() string {
}
func (c *MySQLConnection) Close() error {
return c.db.Close()
if err := c.db.Close(); err != nil {
return errors.NewCloseConnectionError(err)
}
return nil
}
func (c *MySQLConnection) Check(ctx context.Context) error {
return c.db.PingContext(ctx)
if err := c.db.PingContext(ctx); err != nil {
return errors.NewCheckConnectionError(err)
}
return nil
}
func (c *MySQLConnection) CreateUser(ctx context.Context, user, password string) error {
return c.mutate(ctx, mysqlCreateUserStatement, user, password)
if err := c.mutate(ctx, mysqlCreateUserStatement, user, password); err != nil {
return errors.NewCreateUserError(err)
}
return nil
}
func (c *MySQLConnection) CreateDB(ctx context.Context, dbName string) error {
return c.mutate(ctx, mysqlCreateDBStatement, dbName)
if err := c.mutate(ctx, mysqlCreateDBStatement, dbName); err != nil {
return errors.NewCreateDBError(err)
}
return nil
}
func (c *MySQLConnection) GrantPrivileges(ctx context.Context, user, dbName string) error {
return c.mutate(ctx, mysqlGrantPrivilegesStatement, user, dbName)
if err := c.mutate(ctx, mysqlGrantPrivilegesStatement, user, dbName); err != nil {
return errors.NewGrantPrivilegesError(err)
}
return nil
}
func (c *MySQLConnection) UserExists(ctx context.Context, user string) (bool, error) {
@@ -124,7 +132,12 @@ func (c *MySQLConnection) UserExists(ctx context.Context, user string) (bool, er
return name == user, nil
}
return c.check(ctx, mysqlFetchUserStatement, checker, user)
ok, err := c.check(ctx, mysqlFetchUserStatement, checker, user)
if err != nil {
return false, errors.NewCheckUserExistsError(err)
}
return ok, nil
}
func (c *MySQLConnection) DBExists(ctx context.Context, dbName string) (bool, error) {
@@ -141,14 +154,19 @@ func (c *MySQLConnection) DBExists(ctx context.Context, dbName string) (bool, er
return name == dbName, nil
}
return c.check(ctx, mysqlFetchDBStatement, checker, dbName)
ok, err := c.check(ctx, mysqlFetchDBStatement, checker, dbName)
if err != nil {
return false, errors.NewCheckDatabaseExistError(err)
}
return ok, nil
}
func (c *MySQLConnection) GrantPrivilegesExists(ctx context.Context, user, dbName string) (bool, error) {
statementShowGrantsStatement := fmt.Sprintf(mysqlShowGrantsStatement, user)
rows, err := c.db.Query(statementShowGrantsStatement)
if err != nil {
return false, err
return false, errors.NewGrantPrivilegesError(err)
}
expected := fmt.Sprintf(mysqlGrantPrivilegesStatement, user, dbName)
@@ -156,7 +174,7 @@ func (c *MySQLConnection) GrantPrivilegesExists(ctx context.Context, user, dbNam
for rows.Next() {
if err = rows.Scan(&grant); err != nil {
return false, err
return false, errors.NewGrantPrivilegesError(err)
}
if grant == expected {
@@ -168,15 +186,27 @@ func (c *MySQLConnection) GrantPrivilegesExists(ctx context.Context, user, dbNam
}
func (c *MySQLConnection) DeleteUser(ctx context.Context, user string) error {
return c.mutate(ctx, mysqlDropUserStatement, user)
if err := c.mutate(ctx, mysqlDropUserStatement, user); err != nil {
return errors.NewDeleteUserError(err)
}
return nil
}
func (c *MySQLConnection) DeleteDB(ctx context.Context, dbName string) error {
return c.mutate(ctx, mysqlDropDBStatement, dbName)
if err := c.mutate(ctx, mysqlDropDBStatement, dbName); err != nil {
return errors.NewCannotDeleteDatabaseError(err)
}
return nil
}
func (c *MySQLConnection) RevokePrivileges(ctx context.Context, user, dbName string) error {
return c.mutate(ctx, mysqlRevokePrivilegesStatement, user, dbName)
if err := c.mutate(ctx, mysqlRevokePrivilegesStatement, user, dbName); err != nil {
return errors.NewRevokePrivilegesError(err)
}
return nil
}
func (c *MySQLConnection) check(ctx context.Context, nonFilledStatement string, checker func(*sql.Row) (bool, error), args ...any) (bool, error) {

View File

@@ -11,6 +11,7 @@ import (
"github.com/go-pg/pg/v10"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/datastore/errors"
)
const (
@@ -30,6 +31,18 @@ type PostgreSQLConnection struct {
connection ConnectionEndpoint
}
func NewPostgreSQLConnection(config ConnectionConfig) (Connection, error) {
opt := &pg.Options{
Addr: config.Endpoints[0].String(),
Database: config.DBName,
User: config.User,
Password: config.Password,
TLSConfig: config.TLSConfig,
}
return &PostgreSQLConnection{db: pg.Connect(opt), connection: config.Endpoints[0]}, nil
}
func (r *PostgreSQLConnection) Driver() string {
return string(kamajiv1alpha1.KinePostgreSQLDriver)
}
@@ -37,7 +50,7 @@ func (r *PostgreSQLConnection) Driver() string {
func (r *PostgreSQLConnection) UserExists(ctx context.Context, user string) (bool, error) {
res, err := r.db.ExecContext(ctx, postgresqlUserExists, user)
if err != nil {
return false, err
return false, errors.NewCheckUserExistsError(err)
}
return res.RowsReturned() > 0, nil
@@ -46,7 +59,7 @@ func (r *PostgreSQLConnection) UserExists(ctx context.Context, user string) (boo
func (r *PostgreSQLConnection) CreateUser(ctx context.Context, user, password string) error {
_, err := r.db.ExecContext(ctx, fmt.Sprintf(postgresqlCreateUserStatement, user), password)
if err != nil {
return err
return errors.NewCreateUserError(err)
}
return nil
@@ -55,7 +68,7 @@ func (r *PostgreSQLConnection) CreateUser(ctx context.Context, user, password st
func (r *PostgreSQLConnection) DBExists(ctx context.Context, dbName string) (bool, error) {
rows, err := r.db.ExecContext(ctx, postgresqlFetchDBStatement, dbName)
if err != nil {
return false, err
return false, errors.NewCheckDatabaseExistError(err)
}
return rows.RowsReturned() > 0, nil
@@ -64,7 +77,7 @@ func (r *PostgreSQLConnection) DBExists(ctx context.Context, dbName string) (boo
func (r *PostgreSQLConnection) CreateDB(ctx context.Context, dbName string) error {
_, err := r.db.ExecContext(ctx, fmt.Sprintf(postgresqlCreateDBStatement, dbName))
if err != nil {
return err
return errors.NewCreateDBError(err)
}
return nil
@@ -79,35 +92,42 @@ func (r *PostgreSQLConnection) GrantPrivilegesExists(ctx context.Context, user,
return false, nil
}
return false, err
return false, errors.NewCheckGrantExistsError(err)
}
return hasDatabasePrivilege == "t", nil
}
func (r *PostgreSQLConnection) GrantPrivileges(ctx context.Context, user, dbName string) error {
res, err := r.db.ExecContext(ctx, fmt.Sprintf(postgresqlGrantPrivilegesStatement, dbName, user))
_ = res
if _, err := r.db.ExecContext(ctx, fmt.Sprintf(postgresqlGrantPrivilegesStatement, dbName, user)); err != nil {
return errors.NewGrantPrivilegesError(err)
}
return err
return nil
}
func (r *PostgreSQLConnection) DeleteUser(ctx context.Context, user string) error {
_, err := r.db.ExecContext(ctx, fmt.Sprintf(postgresqlDropRoleStatement, user))
if _, err := r.db.ExecContext(ctx, fmt.Sprintf(postgresqlDropRoleStatement, user)); err != nil {
return errors.NewDeleteUserError(err)
}
return err
return nil
}
func (r *PostgreSQLConnection) DeleteDB(ctx context.Context, dbName string) error {
_, err := r.db.ExecContext(ctx, fmt.Sprintf(postgresqlDropDBStatement, dbName))
if _, err := r.db.ExecContext(ctx, fmt.Sprintf(postgresqlDropDBStatement, dbName)); err != nil {
return errors.NewCannotDeleteDatabaseError(err)
}
return err
return nil
}
func (r *PostgreSQLConnection) RevokePrivileges(ctx context.Context, user, dbName string) error {
_, err := r.db.ExecContext(ctx, fmt.Sprintf(postgresqlRevokePrivilegesStatement, dbName, user))
if _, err := r.db.ExecContext(ctx, fmt.Sprintf(postgresqlRevokePrivilegesStatement, dbName, user)); err != nil {
return errors.NewRevokePrivilegesError(err)
}
return err
return nil
}
func (r *PostgreSQLConnection) GetConnectionString() string {
@@ -115,9 +135,17 @@ func (r *PostgreSQLConnection) GetConnectionString() string {
}
func (r *PostgreSQLConnection) Close() error {
return r.db.Close()
if err := r.db.Close(); err != nil {
return errors.NewCloseConnectionError(err)
}
return nil
}
func (r *PostgreSQLConnection) Check(ctx context.Context) error {
return r.db.Ping(ctx)
if err := r.db.Ping(ctx); err != nil {
return errors.NewCheckConnectionError(err)
}
return nil
}

View File

@@ -1,47 +0,0 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package etcd
import (
"bytes"
"crypto/x509"
"crypto/x509/pkix"
"math/big"
"math/rand"
"time"
"github.com/clastix/kamaji/internal/crypto"
)
func GetETCDCACertificateAndKeyPair(tenant string, caCert []byte, caPrivKey []byte) (*bytes.Buffer, *bytes.Buffer, error) {
template := getCertTemplate(tenant)
return crypto.GetCertificateAndKeyPair(template, caCert, caPrivKey)
}
func IsETCDCertificateAndKeyPairValid(cert []byte, privKey []byte) (bool, error) {
return crypto.IsValidCertificateKeyPairBytes(cert, privKey)
}
func getCertTemplate(tenant string) *x509.Certificate {
serialNumber := big.NewInt(rand.Int63())
return &x509.Certificate{
PublicKeyAlgorithm: x509.RSA,
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: tenant,
Organization: []string{certOrganization},
},
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(certExpirationDelayYears, 0, 0),
SubjectKeyId: []byte{1, 2, 3, 4, 6},
ExtKeyUsage: []x509.ExtKeyUsage{
x509.ExtKeyUsageClientAuth,
x509.ExtKeyUsageServerAuth,
x509.ExtKeyUsageCodeSigning,
},
KeyUsage: x509.KeyUsageDigitalSignature,
}
}

View File

@@ -1,9 +0,0 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package etcd
const (
certExpirationDelayYears = 10
certOrganization = "system:masters"
)

View File

@@ -5,25 +5,13 @@ package kubeadm
import (
"context"
"fmt"
"time"
"io"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api/v1"
"k8s.io/component-base/config/v1alpha1"
kubeproxyconfig "k8s.io/kube-proxy/config/v1alpha1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
"k8s.io/utils/pointer"
"github.com/clastix/kamaji/internal/utilities"
)
const (
@@ -34,7 +22,14 @@ const (
)
func AddCoreDNS(client kubernetes.Interface, config *Configuration) error {
return dns.EnsureDNSAddon(&config.InitConfiguration.ClusterConfiguration, client)
// We're passing the values from the parameters here because they wouldn't be hashed by the YAML encoder:
// the struct kubeadm.ClusterConfiguration hasn't struct tags, and it wouldn't be hashed properly.
if opts := config.Parameters.CoreDNSOptions; opts != nil {
config.InitConfiguration.DNS.ImageRepository = opts.Repository
config.InitConfiguration.DNS.ImageTag = opts.Tag
}
return dns.EnsureDNSAddon(&config.InitConfiguration.ClusterConfiguration, client, io.Discard, false)
}
func RemoveCoreDNSAddon(ctx context.Context, client kubernetes.Interface) error {
@@ -104,24 +99,15 @@ func getCoreDNSConfigMapName(ctx context.Context) (string, error) {
return coreDNSName, nil
}
func AddKubeProxy(client kubernetes.Interface, config *Configuration) error {
if err := proxy.CreateServiceAccount(client); err != nil {
return errors.Wrap(err, "error when creating kube-proxy service account")
}
func AddKubeProxy(client kubernetes.Interface, config *Configuration) (err error) {
// This is a workaround since the function EnsureProxyAddon is picking repository and tag from the InitConfiguration
// struct, although is counterintuitive
config.InitConfiguration.ClusterConfiguration.CIImageRepository = config.Parameters.KubeProxyOptions.Repository
config.InitConfiguration.KubernetesVersion = config.Parameters.KubeProxyOptions.Tag
if err := createKubeProxyConfigMap(client, config); err != nil {
return err
}
err = proxy.EnsureProxyAddon(&config.InitConfiguration.ClusterConfiguration, &config.InitConfiguration.LocalAPIEndpoint, client, io.Discard, false)
if err := createKubeProxyAddon(client, config.Parameters.KubeProxyImage); err != nil {
return err
}
if err := proxy.CreateRBACRules(client); err != nil {
return errors.Wrap(err, "error when creating kube-proxy RBAC rules")
}
return nil
return
}
func RemoveKubeProxy(ctx context.Context, client kubernetes.Interface) error {
@@ -215,247 +201,3 @@ func getKubeProxyConfigMapName(ctx context.Context) (string, error) {
// Implement a method for future approaches
return kubeProxyName, nil
}
func createKubeProxyConfigMap(client kubernetes.Interface, config *Configuration) error {
configConf, err := getKubeproxyConfigmapContent(config)
if err != nil {
return err
}
kubeconfigConf, err := getKubeproxyKubeconfigContent(config)
if err != nil {
return err
}
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: kubeadmconstants.KubeProxyConfigMap,
Namespace: "kube-system",
Labels: map[string]string{
"app": "kube-proxy",
},
},
Data: map[string]string{
kubeadmconstants.KubeProxyConfigMapKey: string(configConf),
"kubeconfig.conf": string(kubeconfigConf),
},
}
return apiclient.CreateOrUpdateConfigMap(client, configMap)
}
func createKubeProxyAddon(client kubernetes.Interface, image string) error {
daemonSet := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy",
Namespace: "kube-system",
Labels: map[string]string{
"k8s-app": "kube-proxy",
},
},
Spec: appsv1.DaemonSetSpec{
RevisionHistoryLimit: pointer.Int32(10),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "kube-proxy",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"k8s-app": "kube-proxy",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Command: []string{
"/usr/local/bin/kube-proxy",
"--config=/var/lib/kube-proxy/config.conf",
"--hostname-override=$(NODE_NAME)",
},
Env: []corev1.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "spec.nodeName",
},
},
},
},
Image: image,
ImagePullPolicy: corev1.PullIfNotPresent,
Name: "kube-proxy",
SecurityContext: &corev1.SecurityContext{
Privileged: pointer.Bool(true),
},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: "File",
VolumeMounts: []corev1.VolumeMount{
{
MountPath: "/var/lib/kube-proxy",
Name: "kube-proxy",
},
{
MountPath: "/run/xtables.lock",
Name: "xtables-lock",
},
{
MountPath: "/lib/modules",
Name: "lib-modules",
ReadOnly: true,
},
},
},
},
DNSPolicy: corev1.DNSClusterFirst,
HostNetwork: true,
NodeSelector: map[string]string{
"kubernetes.io/os": "linux",
},
Tolerations: []corev1.Toleration{
{Operator: corev1.TolerationOpExists},
},
PriorityClassName: "system-node-critical",
RestartPolicy: corev1.RestartPolicyAlways,
SchedulerName: "default-scheduler",
ServiceAccountName: "kube-proxy",
TerminationGracePeriodSeconds: pointer.Int64(30),
Volumes: []corev1.Volume{
{
Name: "kube-proxy",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
DefaultMode: pointer.Int32(420),
LocalObjectReference: corev1.LocalObjectReference{
Name: "kube-proxy",
},
},
},
},
{
Name: "xtables-lock",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/run/xtables.lock",
Type: (*corev1.HostPathType)(pointer.String(string(corev1.HostPathFileOrCreate))),
},
},
},
{
Name: "lib-modules",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/lib/modules",
Type: (*corev1.HostPathType)(pointer.String(string(corev1.HostPathUnset))),
},
},
},
},
},
},
},
}
return apiclient.CreateOrUpdateDaemonSet(client, daemonSet)
}
func getKubeproxyConfigmapContent(config *Configuration) ([]byte, error) {
zeroDuration := metav1.Duration{Duration: 0}
oneSecondDuration := metav1.Duration{Duration: time.Second}
kubeProxyConfiguration := kubeproxyconfig.KubeProxyConfiguration{
TypeMeta: metav1.TypeMeta{
Kind: "KubeProxyConfiguration",
APIVersion: "kubeproxy.config.k8s.io/v1alpha1",
},
BindAddress: "0.0.0.0",
BindAddressHardFail: false,
ClientConnection: v1alpha1.ClientConnectionConfiguration{
AcceptContentTypes: "",
Burst: 0,
ContentType: "",
Kubeconfig: "/var/lib/kube-proxy/kubeconfig.conf",
QPS: 0,
},
ClusterCIDR: config.Parameters.TenantControlPlanePodCIDR,
ConfigSyncPeriod: zeroDuration,
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
MaxPerCore: pointer.Int32(0),
Min: nil,
TCPCloseWaitTimeout: nil,
TCPEstablishedTimeout: nil,
},
DetectLocalMode: "",
EnableProfiling: false,
HealthzBindAddress: "",
HostnameOverride: "",
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeAll: false,
MasqueradeBit: nil,
MinSyncPeriod: oneSecondDuration,
SyncPeriod: zeroDuration,
},
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
ExcludeCIDRs: nil,
MinSyncPeriod: zeroDuration,
Scheduler: "",
StrictARP: false,
SyncPeriod: zeroDuration,
TCPTimeout: zeroDuration,
TCPFinTimeout: zeroDuration,
UDPTimeout: zeroDuration,
},
MetricsBindAddress: "",
Mode: "iptables",
NodePortAddresses: nil,
OOMScoreAdj: nil,
PortRange: "",
ShowHiddenMetricsForVersion: "",
UDPIdleTimeout: zeroDuration,
Winkernel: kubeproxyconfig.KubeProxyWinkernelConfiguration{
EnableDSR: false,
NetworkName: "",
SourceVip: "",
},
}
return utilities.EncondeToYaml(&kubeProxyConfiguration)
}
func getKubeproxyKubeconfigContent(config *Configuration) ([]byte, error) {
kubeconfig := clientcmdapi.Config{
APIVersion: "v1",
Kind: "Config",
Clusters: []clientcmdapi.NamedCluster{
{
Name: "default",
Cluster: clientcmdapi.Cluster{
CertificateAuthority: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
Server: fmt.Sprintf("https://%s:%d", config.Parameters.TenantControlPlaneAddress, config.Parameters.TenantControlPlanePort),
},
},
},
Contexts: []clientcmdapi.NamedContext{
{
Context: clientcmdapi.Context{
Cluster: "default",
Namespace: "default",
AuthInfo: "default",
},
},
},
AuthInfos: []clientcmdapi.NamedAuthInfo{
{
Name: "default",
AuthInfo: clientcmdapi.AuthInfo{
TokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token",
},
},
},
}
return utilities.EncondeToYaml(&kubeconfig)
}

View File

@@ -44,8 +44,8 @@ func GenerateCACertificatePrivateKeyPair(baseName string, config *Configuration)
func GenerateCertificatePrivateKeyPair(baseName string, config *Configuration, ca CertificatePrivateKeyPair) (*CertificatePrivateKeyPair, error) {
defer deleteCertificateDirectory(config.InitConfiguration.CertificatesDir)
certificate, _ := cryptoKamaji.GetCertificate(ca.Certificate)
signer, _ := cryptoKamaji.GetPrivateKey(ca.PrivateKey)
certificate, _ := cryptoKamaji.ParseCertificateBytes(ca.Certificate)
signer, _ := cryptoKamaji.ParsePrivateKeyBytes(ca.PrivateKey)
kubeadmCert, err := getKubeadmCert(baseName)
if err != nil {
@@ -106,28 +106,6 @@ func GeneratePublicKeyPrivateKeyPair(baseName string, config *Configuration) (*P
return publicKeyPrivateKeyPair, err
}
func IsCertificatePrivateKeyPairValid(certificate []byte, privKey []byte) (bool, error) {
if len(certificate) == 0 {
return false, nil
}
if len(privKey) == 0 {
return false, nil
}
return cryptoKamaji.IsValidCertificateKeyPairBytes(certificate, privKey)
}
func IsPublicKeyPrivateKeyPairValid(pubKey []byte, privKey []byte) (bool, error) {
if len(pubKey) == 0 {
return false, nil
}
if len(privKey) == 0 {
return false, nil
}
return cryptoKamaji.IsValidKeyPairBytes(pubKey, privKey)
}
func initPhaseCertsSA(config *Configuration) error {
return certs.CreateServiceAccountKeyAndPublicKeyFiles(config.InitConfiguration.CertificatesDir, config.InitConfiguration.PublicKeyAlgorithm())
}

View File

@@ -4,15 +4,14 @@
package kubeadm
import (
"encoding/json"
"fmt"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
bootstraptokenv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/util/config"
"github.com/clastix/kamaji/internal/utilities"
)
const (
@@ -21,128 +20,103 @@ const (
defaultKeyFile = "/etc/kubernetes/pki/apiserver-etcd-client.key"
)
func CreateKubeadmInitConfiguration(params Parameters) Configuration {
config := kubeadmapi.InitConfiguration{
ClusterConfiguration: getKubeadmClusterConfiguration(params),
BootstrapTokens: []bootstraptokenv1.BootstrapToken{
{
Groups: []string{"system:bootstrappers:kubeadm:default-node-token"},
TTL: &metav1.Duration{Duration: 48 * time.Hour},
Usages: []string{
"signing",
"authentication",
},
},
},
LocalAPIEndpoint: kubeadmapi.APIEndpoint{
AdvertiseAddress: params.TenantControlPlaneAddress,
BindPort: params.TenantControlPlanePort,
},
NodeRegistration: kubeadmapi.NodeRegistrationOptions{
CRISocket: "unix:///run/containerd/containerd.sock",
Name: params.TenantControlPlaneName,
},
func CreateKubeadmInitConfiguration(params Parameters) (*Configuration, error) {
defaultConf, err := config.DefaultedStaticInitConfiguration()
if err != nil {
return nil, err
}
return Configuration{InitConfiguration: config}
}
conf := defaultConf
// Due to unmarshaling error when GetKubeadmInitConfigurationFromMap function is issued,
// we have to store the ComponentConfigs to a null value.
conf.ClusterConfiguration.ComponentConfigs = nil
func isHTTPS(url string) bool {
return strings.HasPrefix(url, "https")
}
conf.LocalAPIEndpoint = kubeadmapi.APIEndpoint{
AdvertiseAddress: params.TenantControlPlaneAddress,
BindPort: params.TenantControlPlanePort,
}
conf.NodeRegistration.Name = params.TenantControlPlaneName
func getKubeadmClusterConfiguration(params Parameters) kubeadmapi.ClusterConfiguration {
caFile, certFile, keyFile := "", "", ""
if isHTTPS(params.ETCDs[0]) {
if strings.HasPrefix(params.ETCDs[0], "https") {
caFile, certFile, keyFile = defaultCAFile, defaultCertFile, defaultKeyFile
}
return kubeadmapi.ClusterConfiguration{
KubernetesVersion: params.TenantControlPlaneVersion,
ClusterName: params.TenantControlPlaneName,
CertificatesDir: "/etc/kubernetes/pki",
ImageRepository: "k8s.gcr.io",
Networking: kubeadmapi.Networking{
DNSDomain: "cluster.local",
PodSubnet: params.TenantControlPlanePodCIDR,
ServiceSubnet: params.TenantControlPlaneServiceCIDR,
},
DNS: kubeadmapi.DNS{
Type: "CoreDNS",
},
ControlPlaneEndpoint: params.TenantControlPlaneEndpoint,
Etcd: kubeadmapi.Etcd{
External: &kubeadmapi.ExternalEtcd{
Endpoints: params.ETCDs,
CAFile: caFile,
CertFile: certFile,
KeyFile: keyFile,
},
},
APIServer: kubeadmapi.APIServer{
CertSANs: append([]string{
"127.0.0.1",
"localhost",
params.TenantControlPlaneName,
fmt.Sprintf("%s.%s.svc", params.TenantControlPlaneName, params.TenantControlPlaneNamespace),
fmt.Sprintf("%s.%s.svc.cluster.local", params.TenantControlPlaneName, params.TenantControlPlaneNamespace),
params.TenantControlPlaneAddress,
}, params.TenantControlPlaneCertSANs...),
ControlPlaneComponent: kubeadmapi.ControlPlaneComponent{
ExtraArgs: map[string]string{
"etcd-compaction-interval": "0s",
"etcd-prefix": fmt.Sprintf("/%s", params.TenantControlPlaneName),
},
},
conf.Etcd = kubeadmapi.Etcd{
External: &kubeadmapi.ExternalEtcd{
Endpoints: params.ETCDs,
CAFile: caFile,
CertFile: certFile,
KeyFile: keyFile,
},
}
conf.Networking = kubeadmapi.Networking{
DNSDomain: "cluster.local",
PodSubnet: params.TenantControlPlanePodCIDR,
ServiceSubnet: params.TenantControlPlaneServiceCIDR,
}
conf.KubernetesVersion = params.TenantControlPlaneVersion
conf.ControlPlaneEndpoint = params.TenantControlPlaneEndpoint
conf.APIServer.CertSANs = append([]string{
"127.0.0.1",
"localhost",
params.TenantControlPlaneName,
fmt.Sprintf("%s.%s.svc", params.TenantControlPlaneName, params.TenantControlPlaneNamespace),
fmt.Sprintf("%s.%s.svc.cluster.local", params.TenantControlPlaneName, params.TenantControlPlaneNamespace),
params.TenantControlPlaneAddress,
}, params.TenantControlPlaneCertSANs...)
conf.APIServer.ControlPlaneComponent.ExtraArgs = map[string]string{
"etcd-compaction-interval": "0s",
"etcd-prefix": fmt.Sprintf("/%s", params.TenantControlPlaneName),
}
conf.ClusterName = params.TenantControlPlaneName
return &Configuration{InitConfiguration: *conf}, nil
}
func GetKubeadmInitConfigurationMap(config Configuration) (map[string]string, error) {
initConfigurationString, err := getJSONStringFromStruct(config.InitConfiguration)
initConfigurationString, err := utilities.EncodeToJSON(&config.InitConfiguration)
if err != nil {
return map[string]string{}, err
return nil, err
}
clusterConfigurationString, err := getJSONStringFromStruct(config.InitConfiguration.ClusterConfiguration)
clusterConfigurationString, err := utilities.EncodeToJSON(&config.InitConfiguration.ClusterConfiguration)
if err != nil {
return map[string]string{}, err
return nil, err
}
return map[string]string{
kubeadmconstants.InitConfigurationKind: initConfigurationString,
kubeadmconstants.ClusterConfigurationKind: clusterConfigurationString,
kubeadmconstants.InitConfigurationKind: string(initConfigurationString),
kubeadmconstants.ClusterConfigurationKind: string(clusterConfigurationString),
}, nil
}
func GetKubeadmInitConfigurationFromMap(config map[string]string) (*Configuration, error) {
initConfigurationString, ok := config[kubeadmconstants.InitConfigurationKind]
func GetKubeadmInitConfigurationFromMap(conf map[string]string) (*Configuration, error) {
initConfigurationString, ok := conf[kubeadmconstants.InitConfigurationKind]
if !ok {
return nil, fmt.Errorf("%s is not in the map", kubeadmconstants.InitConfigurationKind)
}
clusterConfigurationString, ok := config[kubeadmconstants.ClusterConfigurationKind]
clusterConfigurationString, ok := conf[kubeadmconstants.ClusterConfigurationKind]
if !ok {
return nil, fmt.Errorf("%s is not in the map", kubeadmconstants.ClusterConfigurationKind)
}
initConfiguration := kubeadmapi.InitConfiguration{}
if err := json.Unmarshal([]byte(initConfigurationString), &initConfiguration); err != nil {
if err := utilities.DecodeFromJSON(initConfigurationString, &initConfiguration); err != nil {
return nil, err
}
if err := json.Unmarshal([]byte(clusterConfigurationString), &initConfiguration.ClusterConfiguration); err != nil {
if err := utilities.DecodeFromJSON(clusterConfigurationString, &initConfiguration.ClusterConfiguration); err != nil {
return nil, err
}
// Due to some weird issues with unmarshaling of the ComponentConfigs struct,
// we have to extract the default value and assign it directly.
defaults, err := config.DefaultedStaticInitConfiguration()
if err != nil {
return nil, err
}
initConfiguration.ClusterConfiguration.ComponentConfigs = defaults.ComponentConfigs
return &Configuration{InitConfiguration: initConfiguration}, nil
}
func getJSONStringFromStruct(i interface{}) (string, error) {
b, err := json.Marshal(i)
if err != nil {
return "", err
}
return string(b), nil
}

View File

@@ -0,0 +1,38 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package printers
import (
"io"
"k8s.io/apimachinery/pkg/runtime"
)
type Discard struct{}
func (d Discard) PrintObj(obj runtime.Object, writer io.Writer) error {
return nil
}
func (d Discard) Fprintf(writer io.Writer, format string, args ...interface{}) (n int, err error) {
return
}
func (d Discard) Fprintln(writer io.Writer, args ...interface{}) (n int, err error) {
return
}
func (d Discard) Printf(format string, args ...interface{}) (n int, err error) {
return
}
func (d Discard) Println(args ...interface{}) (n int, err error) {
return
}
func (d Discard) Flush(writer io.Writer, last bool) {
}
func (d Discard) Close(writer io.Writer) {
}

View File

@@ -5,30 +5,30 @@ package kubeadm
import (
json "github.com/json-iterator/go"
clientcmdapiv1 "k8s.io/client-go/tools/clientcmd/api/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeconfigutil "github.com/clastix/kamaji/internal/kubeconfig"
"github.com/clastix/kamaji/internal/utilities"
)
type Configuration struct {
InitConfiguration kubeadmapi.InitConfiguration
Kubeconfig kubeconfigutil.Kubeconfig
Kubeconfig clientcmdapiv1.Config
Parameters Parameters
}
func (c *Configuration) Checksum() string {
initConfiguration, _ := utilities.EncondeToYaml(&c.InitConfiguration)
initConfiguration, _ := utilities.EncodeToYaml(&c.InitConfiguration)
kubeconfig, _ := json.Marshal(c.Kubeconfig)
parameters, _ := json.Marshal(c.Parameters)
data := map[string]string{
"InitConfiguration": string(initConfiguration),
"Kubeconfig": string(kubeconfig),
"Parameters": string(parameters),
data := map[string][]byte{
"InitConfiguration": initConfiguration,
"Kubeconfig": kubeconfig,
"Parameters": parameters,
}
return utilities.CalculateConfigMapChecksum(data)
return utilities.CalculateMapChecksum(data)
}
type Parameters struct {
@@ -46,7 +46,13 @@ type Parameters struct {
ETCDs []string
CertificatesDir string
KubeconfigDir string
KubeProxyImage string
KubeProxyOptions *AddonOptions
CoreDNSOptions *AddonOptions
}
type AddonOptions struct {
Repository string
Tag string
}
type KubeletConfiguration struct {

View File

@@ -4,13 +4,10 @@
package kubeadm
import (
"fmt"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/kubernetes"
kubelettypes "k8s.io/kubelet/config/v1beta1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
@@ -37,10 +34,7 @@ func UploadKubeletConfig(client kubernetes.Interface, config *Configuration) err
return err
}
configMapName, err := configMapName(config.Parameters.TenantControlPlaneVersion)
if err != nil {
return err
}
configMapName := kubeadmconstants.KubeletBaseConfigurationConfigMap
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -56,7 +50,7 @@ func UploadKubeletConfig(client kubernetes.Interface, config *Configuration) err
return err
}
if err := createConfigMapRBACRules(client, config.Parameters.TenantControlPlaneVersion); err != nil {
if err := createConfigMapRBACRules(client); err != nil {
return errors.Wrap(err, "error creating kubelet configuration configmap RBAC rules")
}
@@ -117,19 +111,11 @@ func getKubeletConfigmapContent(kubeletConfiguration KubeletConfiguration) ([]by
VolumeStatsAggPeriod: zeroDuration,
}
return utilities.EncondeToYaml(&kc)
return utilities.EncodeToYaml(&kc)
}
func createConfigMapRBACRules(client kubernetes.Interface, kubernetesVersion string) error {
configMapName, err := configMapName(kubernetesVersion)
if err != nil {
return err
}
configMapRBACName, err := configMapRBACName(kubernetesVersion)
if err != nil {
return err
}
func createConfigMapRBACRules(client kubernetes.Interface) error {
configMapRBACName := kubeadmconstants.KubeletBaseConfigMapRole
if err := apiclient.CreateOrUpdateRole(client, &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
@@ -141,7 +127,7 @@ func createConfigMapRBACRules(client kubernetes.Interface, kubernetesVersion str
Verbs: []string{"get"},
APIGroups: []string{""},
Resources: []string{"configmaps"},
ResourceNames: []string{configMapName},
ResourceNames: []string{kubeadmconstants.KubeletBaseConfigurationConfigMap},
},
},
}); err != nil {
@@ -170,21 +156,3 @@ func createConfigMapRBACRules(client kubernetes.Interface, kubernetesVersion str
},
})
}
func configMapName(kubernetesVersion string) (string, error) {
version, err := k8sversion.ParseSemantic(kubernetesVersion)
if err != nil {
return "", err
}
return kubeadmconstants.GetKubeletConfigMapName(version, true), nil
}
func configMapRBACName(kubernetesVersion string) (string, error) {
version, err := k8sversion.ParseSemantic(kubernetesVersion)
if err != nil {
return "", err
}
return fmt.Sprintf("%s%d.%d", kubeadmconstants.KubeletBaseConfigMapRolePrefix, version.Major(), version.Minor()), nil
}

View File

@@ -1,28 +0,0 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package kubeconfig
import (
"bytes"
"k8s.io/apimachinery/pkg/util/yaml"
v1 "k8s.io/client-go/tools/clientcmd/api/v1"
)
type Kubeconfig v1.Config
func GetKubeconfigFromBytesBuffer(buffer *bytes.Buffer) (*Kubeconfig, error) {
kubeconfig := &Kubeconfig{}
if err := yaml.NewYAMLOrJSONDecoder(buffer, buffer.Len()).Decode(kubeconfig); err != nil {
return nil, err
}
return kubeconfig, nil
}
func GetKubeconfigFromBytes(b []byte) (*Kubeconfig, error) {
buffer := bytes.NewBuffer(b)
return GetKubeconfigFromBytesBuffer(buffer)
}

View File

@@ -7,7 +7,6 @@ import (
"context"
"fmt"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
@@ -15,8 +14,11 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -24,12 +26,11 @@ import (
type APIServerCertificate struct {
resource *corev1.Secret
Client client.Client
Log logr.Logger
TmpDirectory string
}
func (r *APIServerCertificate) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.APIServer.Checksum != r.resource.GetAnnotations()["checksum"]
return tenantControlPlane.Status.Certificates.APIServer.Checksum != r.resource.GetAnnotations()[constants.Checksum]
}
func (r *APIServerCertificate) ShouldCleanup(_ *kamajiv1alpha1.TenantControlPlane) bool {
@@ -74,20 +75,22 @@ func (r *APIServerCertificate) GetName() string {
func (r *APIServerCertificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.APIServer.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.APIServer.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.APIServer.Checksum = r.resource.GetAnnotations()["checksum"]
tenantControlPlane.Status.Certificates.APIServer.Checksum = r.resource.GetAnnotations()[constants.Checksum]
return nil
}
func (r *APIServerCertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
if checksum := tenantControlPlane.Status.Certificates.APIServer.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()["checksum"] {
isValid, err := kubeadm.IsCertificatePrivateKeyPairValid(
logger := log.FromContext(ctx, "resource", r.GetName())
if checksum := tenantControlPlane.Status.Certificates.APIServer.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()[constants.Checksum] {
isValid, err := crypto.CheckCertificateAndPrivateKeyPairValidity(
r.resource.Data[kubeadmconstants.APIServerCertName],
r.resource.Data[kubeadmconstants.APIServerKeyName],
)
if err != nil {
r.Log.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.APIServerCertAndKeyBaseName, err.Error()))
logger.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.APIServerCertAndKeyBaseName, err.Error()))
}
if isValid {
return nil
@@ -96,12 +99,16 @@ func (r *APIServerCertificate) mutate(ctx context.Context, tenantControlPlane *k
config, err := getStoredKubeadmConfiguration(ctx, r, tenantControlPlane)
if err != nil {
logger.Error(err, "cannot retrieve kubeadm configuration")
return err
}
namespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: tenantControlPlane.Status.Certificates.CA.SecretName}
secretCA := &corev1.Secret{}
if err = r.Client.Get(ctx, namespacedName, secretCA); err != nil {
logger.Error(err, "cannot retrieve CA secret")
return err
}
@@ -112,6 +119,8 @@ func (r *APIServerCertificate) mutate(ctx context.Context, tenantControlPlane *k
}
certificateKeyPair, err := kubeadm.GenerateCertificatePrivateKeyPair(kubeadmconstants.APIServerCertAndKeyBaseName, config, ca)
if err != nil {
logger.Error(err, "cannot generate certificate and private key")
return err
}
@@ -124,7 +133,7 @@ func (r *APIServerCertificate) mutate(ctx context.Context, tenantControlPlane *k
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
r.resource.SetLabels(utilities.MergeMaps(

View File

@@ -7,7 +7,6 @@ import (
"context"
"fmt"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
@@ -15,8 +14,11 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -24,12 +26,11 @@ import (
type APIServerKubeletClientCertificate struct {
resource *corev1.Secret
Client client.Client
Log logr.Logger
TmpDirectory string
}
func (r *APIServerKubeletClientCertificate) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum != r.resource.GetAnnotations()["checksum"]
return tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum != r.resource.GetAnnotations()[constants.Checksum]
}
func (r *APIServerKubeletClientCertificate) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
@@ -74,20 +75,22 @@ func (r *APIServerKubeletClientCertificate) GetName() string {
func (r *APIServerKubeletClientCertificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.APIServerKubeletClient.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.APIServerKubeletClient.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum = r.resource.GetAnnotations()["checksum"]
tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum = r.resource.GetAnnotations()[constants.Checksum]
return nil
}
func (r *APIServerKubeletClientCertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
if checksum := tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()["checksum"] {
isValid, err := kubeadm.IsCertificatePrivateKeyPairValid(
logger := log.FromContext(ctx, "resource", r.GetName())
if checksum := tenantControlPlane.Status.Certificates.APIServerKubeletClient.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()[constants.Checksum] {
isValid, err := crypto.CheckCertificateAndPrivateKeyPairValidity(
r.resource.Data[kubeadmconstants.APIServerKubeletClientCertName],
r.resource.Data[kubeadmconstants.APIServerKubeletClientKeyName],
)
if err != nil {
r.Log.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName, err.Error()))
logger.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName, err.Error()))
}
if isValid {
return nil
@@ -96,6 +99,8 @@ func (r *APIServerKubeletClientCertificate) mutate(ctx context.Context, tenantCo
config, err := getStoredKubeadmConfiguration(ctx, r, tenantControlPlane)
if err != nil {
logger.Error(err, "cannot retrieve kubeadm configuration")
return err
}
@@ -103,6 +108,8 @@ func (r *APIServerKubeletClientCertificate) mutate(ctx context.Context, tenantCo
secretCA := &corev1.Secret{}
if err = r.Client.Get(ctx, namespacedName, secretCA); err != nil {
logger.Error(err, "cannot retrieve CA secret")
return err
}
@@ -113,6 +120,8 @@ func (r *APIServerKubeletClientCertificate) mutate(ctx context.Context, tenantCo
}
certificateKeyPair, err := kubeadm.GenerateCertificatePrivateKeyPair(kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName, config, ca)
if err != nil {
logger.Error(err, "cannot generate certificate and private key")
return err
}
@@ -133,7 +142,7 @@ func (r *APIServerKubeletClientCertificate) mutate(ctx context.Context, tenantCo
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())

View File

@@ -7,15 +7,17 @@ import (
"context"
"fmt"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -23,13 +25,12 @@ import (
type CACertificate struct {
resource *corev1.Secret
Client client.Client
Log logr.Logger
TmpDirectory string
}
func (r *CACertificate) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.CA.SecretName != r.resource.GetName() ||
tenantControlPlane.Status.Certificates.CA.Checksum != r.resource.GetAnnotations()["checksum"]
tenantControlPlane.Status.Certificates.CA.Checksum != r.resource.GetAnnotations()[constants.Checksum]
}
func (r *CACertificate) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
@@ -74,20 +75,22 @@ func (r *CACertificate) GetName() string {
func (r *CACertificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.CA.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.CA.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.CA.Checksum = r.resource.GetAnnotations()["checksum"]
tenantControlPlane.Status.Certificates.CA.Checksum = r.resource.GetAnnotations()[constants.Checksum]
return nil
}
func (r *CACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
if checksum := tenantControlPlane.Status.Certificates.CA.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()["checksum"] {
isValid, err := kubeadm.IsCertificatePrivateKeyPairValid(
logger := log.FromContext(ctx, "resource", r.GetName())
if checksum := tenantControlPlane.Status.Certificates.CA.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()[constants.Checksum] {
isValid, err := crypto.CheckCertificateAndPrivateKeyPairValidity(
r.resource.Data[kubeadmconstants.CACertName],
r.resource.Data[kubeadmconstants.CAKeyName],
)
if err != nil {
r.Log.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.CACertAndKeyBaseName, err.Error()))
logger.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.CACertAndKeyBaseName, err.Error()))
}
if isValid {
return nil
@@ -96,11 +99,15 @@ func (r *CACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1
config, err := getStoredKubeadmConfiguration(ctx, r, tenantControlPlane)
if err != nil {
logger.Error(err, "cannot retrieve kubeadm configuration")
return err
}
ca, err := kubeadm.GenerateCACertificatePrivateKeyPair(kubeadmconstants.CACertAndKeyBaseName, config)
if err != nil {
logger.Error(err, "cannot generate certificate and private key")
return err
}
@@ -121,7 +128,7 @@ func (r *CACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())

View File

@@ -13,9 +13,11 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/etcd"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -27,7 +29,7 @@ type Certificate struct {
}
func (r *Certificate) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Storage.Certificate.Checksum != r.resource.GetAnnotations()["checksum"]
return tenantControlPlane.Status.Storage.Certificate.Checksum != r.resource.GetAnnotations()[constants.Checksum]
}
func (r *Certificate) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
@@ -68,7 +70,7 @@ func (r *Certificate) GetName() string {
func (r *Certificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Storage.Certificate.SecretName = r.resource.GetName()
tenantControlPlane.Status.Storage.Certificate.Checksum = r.resource.GetAnnotations()["checksum"]
tenantControlPlane.Status.Storage.Certificate.Checksum = r.resource.GetAnnotations()[constants.Checksum]
tenantControlPlane.Status.Storage.Certificate.LastUpdate = metav1.Now()
return nil
@@ -76,16 +78,20 @@ func (r *Certificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantCo
func (r *Certificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
logger := log.FromContext(ctx, "resource", r.GetName())
ca, err := r.DataStore.Spec.TLSConfig.CertificateAuthority.Certificate.GetContent(ctx, r.Client)
if err != nil {
logger.Error(err, "cannot retrieve CA certificate content")
return err
}
r.resource.Data["ca.crt"] = ca
if r.resource.GetAnnotations()["checksum"] == utilities.CalculateConfigMapChecksum(r.resource.StringData) {
if r.resource.GetAnnotations()[constants.Checksum] == utilities.CalculateMapChecksum(r.resource.Data) {
if r.DataStore.Spec.Driver == kamajiv1alpha1.EtcdDriver {
if isValid, _ := etcd.IsETCDCertificateAndKeyPairValid(r.resource.Data["server.crt"], r.resource.Data["server.key"]); isValid {
if isValid, _ := crypto.IsValidCertificateKeyPairBytes(r.resource.Data["server.crt"], r.resource.Data["server.key"]); isValid {
return nil
}
}
@@ -99,11 +105,15 @@ func (r *Certificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1al
// certificate used for authentication is mandatory, along with the CA private key.
privateKey, err := r.DataStore.Spec.TLSConfig.CertificateAuthority.PrivateKey.GetContent(ctx, r.Client)
if err != nil {
logger.Error(err, "unable to retrieve CA private key content")
return err
}
crt, key, err = etcd.GetETCDCACertificateAndKeyPair(tenantControlPlane.GetName(), ca, privateKey)
crt, key, err = crypto.GenerateCertificatePrivateKeyPair(crypto.NewCertificateTemplate(tenantControlPlane.GetName()), ca, privateKey)
if err != nil {
logger.Error(err, "unable to generate certificate and private key")
return err
}
case kamajiv1alpha1.KineMySQLDriver, kamajiv1alpha1.KinePostgreSQLDriver:
@@ -111,12 +121,16 @@ func (r *Certificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1al
// to connect to the desired schema and database.
crtBytes, err := r.DataStore.Spec.TLSConfig.ClientCertificate.Certificate.GetContent(ctx, r.Client)
if err != nil {
logger.Error(err, "unable to retrieve certificate content")
return err
}
crt = bytes.NewBuffer(crtBytes)
keyBytes, err := r.DataStore.Spec.TLSConfig.ClientCertificate.PrivateKey.GetContent(ctx, r.Client)
if err != nil {
logger.Error(err, "unable to retrieve private key content")
return err
}
key = bytes.NewBuffer(keyBytes)
@@ -131,7 +145,7 @@ func (r *Certificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1al
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
r.resource.SetLabels(utilities.MergeMaps(

View File

@@ -6,11 +6,13 @@ package datastore
import (
"context"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/datastore"
@@ -44,12 +46,16 @@ func (r *Setup) CleanUp(context.Context, *kamajiv1alpha1.TenantControlPlane) (bo
}
func (r *Setup) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
logger := log.FromContext(ctx, "resource", r.GetName())
secret := &corev1.Secret{}
namespacedName := types.NamespacedName{
Namespace: tenantControlPlane.GetNamespace(),
Name: tenantControlPlane.Status.Storage.Config.SecretName,
}
if err := r.Client.Get(ctx, namespacedName, secret); err != nil {
logger.Error(err, "cannot retrieve the DataStore Configuration secret")
return err
}
@@ -67,6 +73,8 @@ func (r *Setup) GetClient() client.Client {
}
func (r *Setup) CreateOrUpdate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
logger := log.FromContext(ctx, "resource", r.GetName())
if tenantControlPlane.Status.Storage.Setup.Checksum != "" &&
tenantControlPlane.Status.Storage.Setup.Checksum != tenantControlPlane.Status.Storage.Config.Checksum {
if err := r.Delete(ctx, tenantControlPlane); err != nil {
@@ -82,18 +90,24 @@ func (r *Setup) CreateOrUpdate(ctx context.Context, tenantControlPlane *kamajiv1
operationResult, err = r.createDB(ctx, tenantControlPlane)
if err != nil {
logger.Error(err, "unable to create the DataStore data")
return reconcilationResult, err
}
reconcilationResult = utils.UpdateOperationResult(reconcilationResult, operationResult)
operationResult, err = r.createUser(ctx, tenantControlPlane)
if err != nil {
logger.Error(err, "unable to create the DataStore user")
return reconcilationResult, err
}
reconcilationResult = utils.UpdateOperationResult(reconcilationResult, operationResult)
operationResult, err = r.createGrantPrivileges(ctx, tenantControlPlane)
if err != nil {
logger.Error(err, "unable to create the DataStore user privileges")
return reconcilationResult, err
}
reconcilationResult = utils.UpdateOperationResult(reconcilationResult, operationResult)
@@ -106,19 +120,23 @@ func (r *Setup) GetName() string {
}
func (r *Setup) Delete(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
if err := r.Define(ctx, tenantControlPlane); err != nil {
return err
}
logger := log.FromContext(ctx, "resource", r.GetName())
if err := r.revokeGrantPrivileges(ctx, tenantControlPlane); err != nil {
logger.Error(err, "unable to revoke privileges")
return err
}
if err := r.deleteDB(ctx, tenantControlPlane); err != nil {
logger.Error(err, "unable to delete datastore data")
return err
}
if err := r.deleteUser(ctx, tenantControlPlane); err != nil {
logger.Error(err, "unable to delete user")
return err
}
@@ -137,7 +155,7 @@ func (r *Setup) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlP
func (r *Setup) createDB(ctx context.Context, _ *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
exists, err := r.Connection.DBExists(ctx, r.resource.schema)
if err != nil {
return controllerutil.OperationResultNone, err
return controllerutil.OperationResultNone, errors.Wrap(err, "unable to check if datastore exists")
}
if exists {
@@ -145,7 +163,7 @@ func (r *Setup) createDB(ctx context.Context, _ *kamajiv1alpha1.TenantControlPla
}
if err := r.Connection.CreateDB(ctx, r.resource.schema); err != nil {
return controllerutil.OperationResultNone, err
return controllerutil.OperationResultNone, errors.Wrap(err, "unable to create the datastore")
}
return controllerutil.OperationResultCreated, nil
@@ -154,7 +172,7 @@ func (r *Setup) createDB(ctx context.Context, _ *kamajiv1alpha1.TenantControlPla
func (r *Setup) deleteDB(ctx context.Context, _ *kamajiv1alpha1.TenantControlPlane) error {
exists, err := r.Connection.DBExists(ctx, r.resource.schema)
if err != nil {
return err
return errors.Wrap(err, "unable to check if datastore exists")
}
if !exists {
@@ -162,7 +180,7 @@ func (r *Setup) deleteDB(ctx context.Context, _ *kamajiv1alpha1.TenantControlPla
}
if err := r.Connection.DeleteDB(ctx, r.resource.schema); err != nil {
return err
return errors.Wrap(err, "unable to delete the datastore")
}
return nil
@@ -171,7 +189,7 @@ func (r *Setup) deleteDB(ctx context.Context, _ *kamajiv1alpha1.TenantControlPla
func (r *Setup) createUser(ctx context.Context, _ *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
exists, err := r.Connection.UserExists(ctx, r.resource.user)
if err != nil {
return controllerutil.OperationResultNone, err
return controllerutil.OperationResultNone, errors.Wrap(err, "unable to check if user exists")
}
if exists {
@@ -179,7 +197,7 @@ func (r *Setup) createUser(ctx context.Context, _ *kamajiv1alpha1.TenantControlP
}
if err := r.Connection.CreateUser(ctx, r.resource.user, r.resource.password); err != nil {
return controllerutil.OperationResultNone, err
return controllerutil.OperationResultNone, errors.Wrap(err, "unable to create the user")
}
return controllerutil.OperationResultCreated, nil
@@ -188,7 +206,7 @@ func (r *Setup) createUser(ctx context.Context, _ *kamajiv1alpha1.TenantControlP
func (r *Setup) deleteUser(ctx context.Context, _ *kamajiv1alpha1.TenantControlPlane) error {
exists, err := r.Connection.UserExists(ctx, r.resource.user)
if err != nil {
return err
return errors.Wrap(err, "unable to check if user exists")
}
if !exists {
@@ -196,7 +214,7 @@ func (r *Setup) deleteUser(ctx context.Context, _ *kamajiv1alpha1.TenantControlP
}
if err := r.Connection.DeleteUser(ctx, r.resource.user); err != nil {
return err
return errors.Wrap(err, "unable to remove the user")
}
return nil
@@ -205,7 +223,7 @@ func (r *Setup) deleteUser(ctx context.Context, _ *kamajiv1alpha1.TenantControlP
func (r *Setup) createGrantPrivileges(ctx context.Context, _ *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
exists, err := r.Connection.GrantPrivilegesExists(ctx, r.resource.user, r.resource.schema)
if err != nil {
return controllerutil.OperationResultNone, err
return controllerutil.OperationResultNone, errors.Wrap(err, "unable to check if privileges exist")
}
if exists {
@@ -213,7 +231,7 @@ func (r *Setup) createGrantPrivileges(ctx context.Context, _ *kamajiv1alpha1.Ten
}
if err := r.Connection.GrantPrivileges(ctx, r.resource.user, r.resource.schema); err != nil {
return controllerutil.OperationResultNone, err
return controllerutil.OperationResultNone, errors.Wrap(err, "unable to grant privileges")
}
return controllerutil.OperationResultCreated, nil
@@ -222,7 +240,7 @@ func (r *Setup) createGrantPrivileges(ctx context.Context, _ *kamajiv1alpha1.Ten
func (r *Setup) revokeGrantPrivileges(ctx context.Context, _ *kamajiv1alpha1.TenantControlPlane) error {
exists, err := r.Connection.GrantPrivilegesExists(ctx, r.resource.user, r.resource.schema)
if err != nil {
return err
return errors.Wrap(err, "unable to check if privileges exist")
}
if !exists {
@@ -230,7 +248,7 @@ func (r *Setup) revokeGrantPrivileges(ctx context.Context, _ *kamajiv1alpha1.Ten
}
if err := r.Connection.RevokePrivileges(ctx, r.resource.user, r.resource.schema); err != nil {
return err
return errors.Wrap(err, "unable to revoke privileges")
}
return nil

View File

@@ -6,6 +6,7 @@ package datastore
import (
"context"
"github.com/google/uuid"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
@@ -13,6 +14,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -24,7 +26,7 @@ type Config struct {
}
func (r *Config) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Storage.Config.Checksum != r.resource.GetAnnotations()["checksum"] ||
return tenantControlPlane.Status.Storage.Config.Checksum != r.resource.GetAnnotations()[constants.Checksum] ||
tenantControlPlane.Status.Storage.DataStoreName != r.DataStore.GetName()
}
@@ -67,7 +69,7 @@ func (r *Config) UpdateTenantControlPlaneStatus(_ context.Context, tenantControl
tenantControlPlane.Status.Storage.Driver = string(r.DataStore.Spec.Driver)
tenantControlPlane.Status.Storage.DataStoreName = r.DataStore.GetName()
tenantControlPlane.Status.Storage.Config.SecretName = r.resource.GetName()
tenantControlPlane.Status.Storage.Config.Checksum = r.resource.GetAnnotations()["checksum"]
tenantControlPlane.Status.Storage.Config.Checksum = r.resource.GetAnnotations()[constants.Checksum]
return nil
}
@@ -76,12 +78,12 @@ func (r *Config) mutate(_ context.Context, tenantControlPlane *kamajiv1alpha1.Te
return func() error {
var password []byte
savedHash, ok := r.resource.GetAnnotations()["checksum"]
savedHash, ok := r.resource.GetAnnotations()[constants.Checksum]
switch {
case ok && savedHash == utilities.CalculateConfigMapChecksum(r.resource.StringData):
case ok && savedHash == utilities.CalculateMapChecksum(r.resource.Data):
password = r.resource.Data["DB_PASSWORD"]
default:
password = []byte(utilities.GenerateUUIDString())
password = []byte(uuid.New().String())
}
r.resource.Data = map[string][]byte{
@@ -96,7 +98,7 @@ func (r *Config) mutate(_ context.Context, tenantControlPlane *kamajiv1alpha1.Te
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
r.resource.SetLabels(utilities.MergeMaps(

View File

@@ -7,7 +7,6 @@ import (
"context"
"fmt"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
@@ -15,8 +14,11 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -24,12 +26,11 @@ import (
type FrontProxyClientCertificate struct {
resource *corev1.Secret
Client client.Client
Log logr.Logger
TmpDirectory string
}
func (r *FrontProxyClientCertificate) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum != r.resource.GetAnnotations()["checksum"]
return tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum != r.resource.GetAnnotations()[constants.Checksum]
}
func (r *FrontProxyClientCertificate) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
@@ -74,20 +75,22 @@ func (r *FrontProxyClientCertificate) GetName() string {
func (r *FrontProxyClientCertificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.FrontProxyClient.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.FrontProxyClient.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum = r.resource.GetAnnotations()["checksum"]
tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum = r.resource.GetAnnotations()[constants.Checksum]
return nil
}
func (r *FrontProxyClientCertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
if checksum := tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()["checksum"] {
isValid, err := kubeadm.IsCertificatePrivateKeyPairValid(
logger := log.FromContext(ctx, "resource", r.GetName())
if checksum := tenantControlPlane.Status.Certificates.FrontProxyClient.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()[constants.Checksum] {
isValid, err := crypto.CheckCertificateAndPrivateKeyPairValidity(
r.resource.Data[kubeadmconstants.FrontProxyClientCertName],
r.resource.Data[kubeadmconstants.FrontProxyClientKeyName],
)
if err != nil {
r.Log.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.FrontProxyClientCertAndKeyBaseName, err.Error()))
logger.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.FrontProxyClientCertAndKeyBaseName, err.Error()))
}
if isValid {
return nil
@@ -96,12 +99,16 @@ func (r *FrontProxyClientCertificate) mutate(ctx context.Context, tenantControlP
config, err := getStoredKubeadmConfiguration(ctx, r, tenantControlPlane)
if err != nil {
logger.Error(err, "cannot retrieve kubeadm configuration")
return err
}
namespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: tenantControlPlane.Status.Certificates.FrontProxyCA.SecretName}
secretCA := &corev1.Secret{}
if err = r.Client.Get(ctx, namespacedName, secretCA); err != nil {
logger.Error(err, "cannot retrieve CA secret")
return err
}
@@ -112,6 +119,8 @@ func (r *FrontProxyClientCertificate) mutate(ctx context.Context, tenantControlP
}
certificateKeyPair, err := kubeadm.GenerateCertificatePrivateKeyPair(kubeadmconstants.FrontProxyClientCertAndKeyBaseName, config, ca)
if err != nil {
logger.Error(err, "cannot generate certificate and private key")
return err
}
@@ -132,7 +141,7 @@ func (r *FrontProxyClientCertificate) mutate(ctx context.Context, tenantControlP
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())

View File

@@ -7,15 +7,17 @@ import (
"context"
"fmt"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -23,12 +25,11 @@ import (
type FrontProxyCACertificate struct {
resource *corev1.Secret
Client client.Client
Log logr.Logger
TmpDirectory string
}
func (r *FrontProxyCACertificate) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum != r.resource.GetAnnotations()["checksum"]
return tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum != r.resource.GetAnnotations()[constants.Checksum]
}
func (r *FrontProxyCACertificate) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
@@ -73,20 +74,22 @@ func (r *FrontProxyCACertificate) GetName() string {
func (r *FrontProxyCACertificate) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
tenantControlPlane.Status.Certificates.FrontProxyCA.LastUpdate = metav1.Now()
tenantControlPlane.Status.Certificates.FrontProxyCA.SecretName = r.resource.GetName()
tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum = r.resource.GetAnnotations()["checksum"]
tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum = r.resource.GetAnnotations()[constants.Checksum]
return nil
}
func (r *FrontProxyCACertificate) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
if checksum := tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()["checksum"] {
isValid, err := kubeadm.IsCertificatePrivateKeyPairValid(
logger := log.FromContext(ctx, "resource", r.GetName())
if checksum := tenantControlPlane.Status.Certificates.FrontProxyCA.Checksum; len(checksum) > 0 && checksum == r.resource.GetAnnotations()[constants.Checksum] {
isValid, err := crypto.CheckCertificateAndPrivateKeyPairValidity(
r.resource.Data[kubeadmconstants.FrontProxyCACertName],
r.resource.Data[kubeadmconstants.FrontProxyCAKeyName],
)
if err != nil {
r.Log.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.FrontProxyCACertAndKeyBaseName, err.Error()))
logger.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", kubeadmconstants.FrontProxyCACertAndKeyBaseName, err.Error()))
}
if isValid {
return nil
@@ -95,11 +98,15 @@ func (r *FrontProxyCACertificate) mutate(ctx context.Context, tenantControlPlane
config, err := getStoredKubeadmConfiguration(ctx, r, tenantControlPlane)
if err != nil {
logger.Error(err, "cannot retrieve kubeadm configuration")
return err
}
ca, err := kubeadm.GenerateCACertificatePrivateKeyPair(kubeadmconstants.FrontProxyCACertAndKeyBaseName, config)
if err != nil {
logger.Error(err, "cannot generate certificate and private key")
return err
}
@@ -120,7 +127,7 @@ func (r *FrontProxyCACertificate) mutate(ctx context.Context, tenantControlPlane
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())

View File

@@ -5,12 +5,18 @@ package resources
import (
"context"
"crypto/md5"
"fmt"
"sort"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
builder "github.com/clastix/kamaji/internal/builders/controlplane"
@@ -56,9 +62,13 @@ func (r *KubernetesDeploymentResource) Define(_ context.Context, tenantControlPl
func (r *KubernetesDeploymentResource) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
logger := log.FromContext(ctx, "resource", r.GetName())
address, _, err := tenantControlPlane.AssignedControlPlaneAddress()
if err != nil {
return errors.Wrap(err, "cannot create TenantControlPlane Deployment")
logger.Error(err, "cannot retrieve Tenant Control Plane address")
return err
}
d := builder.Deployment{
@@ -69,8 +79,12 @@ func (r *KubernetesDeploymentResource) mutate(ctx context.Context, tenantControl
d.SetLabels(r.resource, utilities.MergeMaps(utilities.CommonLabels(tenantControlPlane.GetName()), tenantControlPlane.Spec.ControlPlane.Deployment.AdditionalMetadata.Labels))
d.SetAnnotations(r.resource, utilities.MergeMaps(r.resource.Annotations, tenantControlPlane.Spec.ControlPlane.Deployment.AdditionalMetadata.Annotations))
d.SetTemplateLabels(&r.resource.Spec.Template, r.deploymentTemplateLabels(ctx, tenantControlPlane))
d.SetNodeSelector(&r.resource.Spec.Template.Spec, tenantControlPlane)
d.SetToleration(&r.resource.Spec.Template.Spec, tenantControlPlane)
d.SetAffinity(&r.resource.Spec.Template.Spec, tenantControlPlane)
d.SetStrategy(&r.resource.Spec)
d.SetSelector(&r.resource.Spec, tenantControlPlane)
d.SetTopologySpreadConstraints(&r.resource.Spec, tenantControlPlane.Spec.ControlPlane.Deployment.TopologySpreadConstraints)
d.SetReplicas(&r.resource.Spec, tenantControlPlane)
d.ResetKubeAPIServerFlags(r.resource, tenantControlPlane)
d.SetContainers(&r.resource.Spec.Template.Spec, tenantControlPlane, address)
@@ -103,6 +117,7 @@ func (r *KubernetesDeploymentResource) UpdateTenantControlPlaneStatus(_ context.
tenantControlPlane.Status.Kubernetes.Deployment = kamajiv1alpha1.KubernetesDeploymentStatus{
DeploymentStatus: r.resource.Status,
Selector: metav1.FormatLabelSelector(r.resource.Spec.Selector),
Name: r.resource.GetName(),
Namespace: r.resource.GetNamespace(),
LastUpdate: metav1.Now(),
@@ -113,7 +128,7 @@ func (r *KubernetesDeploymentResource) UpdateTenantControlPlaneStatus(_ context.
func (r *KubernetesDeploymentResource) deploymentTemplateLabels(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (labels map[string]string) {
hash := func(ctx context.Context, namespace, secretName string) string {
h, _ := utilities.SecretHashValue(ctx, r.Client, namespace, secretName)
h, _ := r.SecretHashValue(ctx, r.Client, namespace, secretName)
return h
}
@@ -158,3 +173,33 @@ func (r *KubernetesDeploymentResource) isProvisioning(tenantControlPlane *kamaji
func (r *KubernetesDeploymentResource) isNotReady() bool {
return r.resource.Status.ReadyReplicas == 0
}
// SecretHashValue function returns the md5 value for the secret of the given name and namespace.
func (r *KubernetesDeploymentResource) SecretHashValue(ctx context.Context, client client.Client, namespace, name string) (string, error) {
secret := &corev1.Secret{}
if err := client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, secret); err != nil {
return "", errors.Wrap(err, "cannot retrieve *corev1.Secret for resource version retrieval")
}
return r.HashValue(*secret), nil
}
// HashValue function returns the md5 value for the given secret.
func (r *KubernetesDeploymentResource) HashValue(secret corev1.Secret) string {
// Go access map values in random way, it means we have to sort them.
keys := make([]string, 0, len(secret.Data))
for k := range secret.Data {
keys = append(keys, k)
}
sort.Strings(keys)
// Generating MD5 of Secret values, sorted by key
h := md5.New()
for _, key := range keys {
h.Write(secret.Data[key])
}
return fmt.Sprintf("%x", h.Sum(nil))
}

View File

@@ -13,6 +13,7 @@ import (
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/utilities"
@@ -34,8 +35,12 @@ func (r *KubernetesIngressResource) ShouldCleanup(tenantControlPlane *kamajiv1al
}
func (r *KubernetesIngressResource) CleanUp(ctx context.Context, _ *kamajiv1alpha1.TenantControlPlane) (bool, error) {
logger := log.FromContext(ctx, "resource", r.GetName())
if err := r.Client.Delete(ctx, r.resource); err != nil {
if !k8serrors.IsNotFound(err) {
logger.Error(err, "cannot cleanup resource")
return false, err
}

View File

@@ -12,6 +12,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/utilities"
@@ -39,6 +40,8 @@ func (r *KubernetesServiceResource) CleanUp(context.Context, *kamajiv1alpha1.Ten
}
func (r *KubernetesServiceResource) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
logger := log.FromContext(ctx, "resource", r.GetName())
tenantControlPlane.Status.Kubernetes.Service.ServiceStatus = r.resource.Status
tenantControlPlane.Status.Kubernetes.Service.Name = r.resource.GetName()
tenantControlPlane.Status.Kubernetes.Service.Namespace = r.resource.GetNamespace()
@@ -46,6 +49,8 @@ func (r *KubernetesServiceResource) UpdateTenantControlPlaneStatus(ctx context.C
address, err := tenantControlPlane.DeclaredControlPlaneAddress(ctx, r.Client)
if err != nil {
logger.Error(err, "cannot retrieve Tenant Control Plane address")
return err
}

View File

@@ -15,8 +15,10 @@ import (
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/utilities"
)
@@ -27,12 +29,11 @@ const (
type Agent struct {
resource *appsv1.DaemonSet
Client client.Client
Name string
tenantClient client.Client
}
func (r *Agent) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Addons.Konnectivity.Agent.Checksum != r.resource.GetAnnotations()["checksum"]
return tenantControlPlane.Status.Addons.Konnectivity.Agent.Checksum != r.resource.GetAnnotations()[constants.Checksum]
}
func (r *Agent) ShouldCleanup(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
@@ -51,7 +52,9 @@ func (r *Agent) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.
return true, nil
}
func (r *Agent) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
func (r *Agent) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (err error) {
logger := log.FromContext(ctx, "resource", r.GetName())
r.resource = &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: AgentName,
@@ -59,22 +62,21 @@ func (r *Agent) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1.T
},
}
client, err := utilities.GetTenantClient(ctx, r.Client, tenantControlPlane)
if err != nil {
if r.tenantClient, err = utilities.GetTenantClient(ctx, r.Client, tenantControlPlane); err != nil {
logger.Error(err, "unable to retrieve the Tenant Control Plane client")
return err
}
r.tenantClient = client
return nil
}
func (r *Agent) CreateOrUpdate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
return controllerutil.CreateOrUpdate(ctx, r.tenantClient, r.resource, r.mutate(tenantControlPlane))
return controllerutil.CreateOrUpdate(ctx, r.tenantClient, r.resource, r.mutate(ctx, tenantControlPlane))
}
func (r *Agent) GetName() string {
return r.Name
return "konnectivity-agent"
}
func (r *Agent) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
@@ -82,24 +84,26 @@ func (r *Agent) UpdateTenantControlPlaneStatus(ctx context.Context, tenantContro
tenantControlPlane.Status.Addons.Konnectivity.Agent = kamajiv1alpha1.ExternalKubernetesObjectStatus{
Name: r.resource.GetName(),
Namespace: r.resource.GetNamespace(),
Checksum: r.resource.GetAnnotations()["checksum"],
Checksum: r.resource.GetAnnotations()[constants.Checksum],
LastUpdate: metav1.Now(),
}
tenantControlPlane.Status.Addons.Konnectivity.Enabled = true
return nil
}
tenantControlPlane.Status.Addons.Konnectivity.Enabled = false
tenantControlPlane.Status.Addons.Konnectivity.Agent = kamajiv1alpha1.ExternalKubernetesObjectStatus{}
return nil
}
func (r *Agent) mutate(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
func (r *Agent) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
logger := log.FromContext(ctx, "resource", r.GetName())
address, _, err := tenantControlPlane.AssignedControlPlaneAddress()
if err != nil {
logger.Error(err, "unable to retrieve the Tenant Control Plane address")
return err
}
@@ -198,12 +202,12 @@ func (r *Agent) mutate(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) co
c.SetAnnotations(nil)
c.SetResourceVersion("")
yaml, _ := utilities.EncondeToYaml(c)
yaml, _ := utilities.EncodeToYaml(c)
annotations := r.resource.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.MD5Checksum(yaml)
annotations[constants.Checksum] = utilities.MD5Checksum(yaml)
r.resource.SetAnnotations(annotations)
return nil

View File

@@ -4,16 +4,9 @@
package konnectivity
import (
"bytes"
"context"
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"math/big"
"math/rand"
"time"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -22,8 +15,10 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/crypto"
"github.com/clastix/kamaji/internal/kubeadm"
"github.com/clastix/kamaji/internal/utilities"
@@ -32,21 +27,23 @@ import (
type CertificateResource struct {
resource *corev1.Secret
Client client.Client
Log logr.Logger
Name string
}
func (r *CertificateResource) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum != r.resource.GetAnnotations()["checksum"]
return tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum != r.resource.GetAnnotations()[constants.Checksum]
}
func (r *CertificateResource) ShouldCleanup(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Spec.Addons.Konnectivity == nil
}
func (r *CertificateResource) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
func (r *CertificateResource) CleanUp(ctx context.Context, _ *kamajiv1alpha1.TenantControlPlane) (bool, error) {
logger := log.FromContext(ctx, "resource", r.GetName())
if err := r.Client.Delete(ctx, r.resource); err != nil {
if !k8serrors.IsNotFound(err) {
logger.Error(err, "cannot delete the required resource")
return false, err
}
@@ -56,10 +53,10 @@ func (r *CertificateResource) CleanUp(ctx context.Context, tenantControlPlane *k
return true, nil
}
func (r *CertificateResource) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
func (r *CertificateResource) Define(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
r.resource = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: r.getPrefixedName(tenantControlPlane),
Name: utilities.AddTenantPrefix(r.GetName(), tenantControlPlane),
Namespace: tenantControlPlane.GetNamespace(),
},
}
@@ -67,43 +64,36 @@ func (r *CertificateResource) Define(ctx context.Context, tenantControlPlane *ka
return nil
}
func (r *CertificateResource) getPrefixedName(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) string {
return utilities.AddTenantPrefix(r.Name, tenantControlPlane)
}
func (r *CertificateResource) CreateOrUpdate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
return controllerutil.CreateOrUpdate(ctx, r.Client, r.resource, r.mutate(ctx, tenantControlPlane))
}
func (r *CertificateResource) GetName() string {
return r.Name
return "konnectivity-certificate"
}
func (r *CertificateResource) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
if tenantControlPlane.Spec.Addons.Konnectivity != nil {
tenantControlPlane.Status.Addons.Konnectivity.Certificate.LastUpdate = metav1.Now()
tenantControlPlane.Status.Addons.Konnectivity.Certificate.SecretName = r.resource.GetName()
tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum = r.resource.GetAnnotations()["checksum"]
tenantControlPlane.Status.Addons.Konnectivity.Enabled = true
tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum = r.resource.GetAnnotations()[constants.Checksum]
return nil
}
tenantControlPlane.Status.Addons.Konnectivity.Certificate = kamajiv1alpha1.CertificatePrivateKeyPairStatus{}
tenantControlPlane.Status.Addons.Konnectivity.Enabled = false
return nil
}
func (r *CertificateResource) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) controllerutil.MutateFn {
return func() error {
if checksum := tenantControlPlane.Status.Certificates.CA.Checksum; len(checksum) > 0 && checksum == utilities.CalculateConfigMapChecksum(r.resource.StringData) {
isValid, err := isCertificateAndKeyPairValid(
r.resource.Data[corev1.TLSCertKey],
r.resource.Data[corev1.TLSPrivateKeyKey],
)
logger := log.FromContext(ctx, "resource", r.GetName())
if checksum := tenantControlPlane.Status.Addons.Konnectivity.Certificate.Checksum; len(checksum) > 0 && checksum == utilities.CalculateMapChecksum(r.resource.Data) {
isValid, err := crypto.IsValidCertificateKeyPairBytes(r.resource.Data[corev1.TLSCertKey], r.resource.Data[corev1.TLSPrivateKeyKey])
if err != nil {
r.Log.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", konnectivityCertAndKeyBaseName, err.Error()))
logger.Info(fmt.Sprintf("%s certificate-private_key pair is not valid: %s", konnectivityCertAndKeyBaseName, err.Error()))
}
if isValid {
return nil
@@ -113,6 +103,8 @@ func (r *CertificateResource) mutate(ctx context.Context, tenantControlPlane *ka
namespacedName := k8stypes.NamespacedName{Namespace: tenantControlPlane.GetNamespace(), Name: tenantControlPlane.Status.Certificates.CA.SecretName}
secretCA := &corev1.Secret{}
if err := r.Client.Get(ctx, namespacedName, secretCA); err != nil {
logger.Error(err, "cannot retrieve the CA secret")
return err
}
@@ -121,8 +113,11 @@ func (r *CertificateResource) mutate(ctx context.Context, tenantControlPlane *ka
Certificate: secretCA.Data[kubeadmconstants.CACertName],
PrivateKey: secretCA.Data[kubeadmconstants.CAKeyName],
}
cert, privKey, err := getCertificateAndKeyPair(ca.Certificate, ca.PrivateKey)
cert, privKey, err := crypto.GenerateCertificatePrivateKeyPair(crypto.NewCertificateTemplate(CertCommonName), ca.Certificate, ca.PrivateKey)
if err != nil {
logger.Error(err, "unable to generate certificate and private key")
return err
}
@@ -144,41 +139,9 @@ func (r *CertificateResource) mutate(ctx context.Context, tenantControlPlane *ka
if annotations == nil {
annotations = map[string]string{}
}
annotations["checksum"] = utilities.CalculateConfigMapChecksum(r.resource.StringData)
annotations[constants.Checksum] = utilities.CalculateMapChecksum(r.resource.Data)
r.resource.SetAnnotations(annotations)
return ctrl.SetControllerReference(tenantControlPlane, r.resource, r.Client.Scheme())
}
}
func getCertificateAndKeyPair(caCert []byte, caPrivKey []byte) (*bytes.Buffer, *bytes.Buffer, error) {
template := getCertTemplate()
return crypto.GetCertificateAndKeyPair(template, caCert, caPrivKey)
}
func isCertificateAndKeyPairValid(cert []byte, privKey []byte) (bool, error) {
return crypto.IsValidCertificateKeyPairBytes(cert, privKey)
}
func getCertTemplate() *x509.Certificate {
serialNumber := big.NewInt(rand.Int63())
return &x509.Certificate{
PublicKeyAlgorithm: x509.RSA,
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: CertCommonName,
Organization: []string{certOrganization},
},
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(certExpirationDelayYears, 0, 0),
SubjectKeyId: []byte{1, 2, 3, 4, 6},
ExtKeyUsage: []x509.ExtKeyUsage{
x509.ExtKeyUsageClientAuth,
x509.ExtKeyUsageServerAuth,
x509.ExtKeyUsageCodeSigning,
},
KeyUsage: x509.KeyUsageDigitalSignature,
}
}

View File

@@ -11,30 +11,35 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/constants"
"github.com/clastix/kamaji/internal/utilities"
)
type ClusterRoleBindingResource struct {
resource *rbacv1.ClusterRoleBinding
Client client.Client
Name string
tenantClient client.Client
}
func (r *ClusterRoleBindingResource) ShouldStatusBeUpdated(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
func (r *ClusterRoleBindingResource) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Status.Addons.Konnectivity.ClusterRoleBinding.Name != r.resource.GetName() ||
tenantControlPlane.Status.Addons.Konnectivity.ClusterRoleBinding.Checksum != r.resource.ObjectMeta.GetAnnotations()["checksum"]
tenantControlPlane.Status.Addons.Konnectivity.ClusterRoleBinding.Checksum != r.resource.ObjectMeta.GetAnnotations()[constants.Checksum]
}
func (r *ClusterRoleBindingResource) ShouldCleanup(tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
return tenantControlPlane.Spec.Addons.Konnectivity == nil
}
func (r *ClusterRoleBindingResource) CleanUp(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (bool, error) {
func (r *ClusterRoleBindingResource) CleanUp(ctx context.Context, _ *kamajiv1alpha1.TenantControlPlane) (bool, error) {
logger := log.FromContext(ctx, "resource", r.GetName())
if err := r.tenantClient.Delete(ctx, r.resource); err != nil {
if !k8serrors.IsNotFound(err) {
logger.Error(err, "cannot delete the requeste resource")
return false, err
}
@@ -44,44 +49,44 @@ func (r *ClusterRoleBindingResource) CleanUp(ctx context.Context, tenantControlP
return true, nil
}
func (r *ClusterRoleBindingResource) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
func (r *ClusterRoleBindingResource) Define(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (err error) {
logger := log.FromContext(ctx, "resource", r.GetName())
r.resource = &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: CertCommonName,
},
}
client, err := utilities.GetTenantClient(ctx, r.Client, tenantControlPlane)
if err != nil {
if r.tenantClient, err = utilities.GetTenantClient(ctx, r.Client, tenantControlPlane); err != nil {
logger.Error(err, "cannot get Tenant Control Plane client")
return err
}
r.tenantClient = client
return nil
}
func (r *ClusterRoleBindingResource) CreateOrUpdate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
func (r *ClusterRoleBindingResource) CreateOrUpdate(ctx context.Context, _ *kamajiv1alpha1.TenantControlPlane) (controllerutil.OperationResult, error) {
return controllerutil.CreateOrUpdate(ctx, r.tenantClient, r.resource, r.mutate())
}
func (r *ClusterRoleBindingResource) GetName() string {
return r.Name
return "konnectivity-clusterrolebinding"
}
func (r *ClusterRoleBindingResource) UpdateTenantControlPlaneStatus(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
func (r *ClusterRoleBindingResource) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
if tenantControlPlane.Spec.Addons.Konnectivity != nil {
tenantControlPlane.Status.Addons.Konnectivity.Enabled = true
tenantControlPlane.Status.Addons.Konnectivity.ClusterRoleBinding = kamajiv1alpha1.ExternalKubernetesObjectStatus{
Name: r.resource.GetName(),
Checksum: r.resource.GetAnnotations()["checksum"],
Checksum: r.resource.GetAnnotations()[constants.Checksum],
}
return nil
}
tenantControlPlane.Status.Addons.Konnectivity.ClusterRoleBinding = kamajiv1alpha1.ExternalKubernetesObjectStatus{}
tenantControlPlane.Status.Addons.Konnectivity.Enabled = false
return nil
}
@@ -115,8 +120,8 @@ func (r *ClusterRoleBindingResource) mutate() controllerutil.MutateFn {
annotations = map[string]string{}
}
yaml, _ := utilities.EncondeToYaml(r.resource)
annotations["checksum"] = utilities.MD5Checksum(yaml)
yaml, _ := utilities.EncodeToYaml(r.resource)
annotations[constants.Checksum] = utilities.MD5Checksum(yaml)
return nil
}

Some files were not shown because too many files have changed in this diff Show More