Compare commits
1 Commits
v0.1.2
...
issues/451
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
86f28a0202 |
4
.github/workflows/ci.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2.3.0
|
||||
with:
|
||||
version: v1.45.2
|
||||
version: latest
|
||||
only-new-issues: false
|
||||
args: --timeout 2m --config .golangci.yml
|
||||
diff:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '^1.16'
|
||||
- run: make installer
|
||||
- name: Checking if YAML installer file is not aligned
|
||||
run: if [[ $(git diff | wc -l) -gt 0 ]]; then echo ">>> Untracked generated files have not been committed" && git --no-pager diff && exit 1; fi
|
||||
|
||||
8
.github/workflows/docker-ci.yml
vendored
@@ -36,7 +36,6 @@ jobs:
|
||||
with:
|
||||
images: |
|
||||
quay.io/${{ github.repository }}
|
||||
docker.io/${{ github.repository }}
|
||||
tags: |
|
||||
type=semver,pattern={{raw}}
|
||||
flavor: |
|
||||
@@ -69,13 +68,6 @@ jobs:
|
||||
username: ${{ github.repository_owner }}+github
|
||||
password: ${{ secrets.BOT_QUAY_IO }}
|
||||
|
||||
- name: Login to docker.io Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.USER_DOCKER_IO }}
|
||||
password: ${{ secrets.BOT_DOCKER_IO }}
|
||||
|
||||
- name: Build and push
|
||||
id: build-release
|
||||
uses: docker/build-push-action@v2
|
||||
|
||||
14
.github/workflows/e2e.yml
vendored
@@ -7,7 +7,6 @@ on:
|
||||
- '.github/workflows/e2e.yml'
|
||||
- 'api/**'
|
||||
- 'controllers/**'
|
||||
- 'pkg/**'
|
||||
- 'e2e/*'
|
||||
- 'Dockerfile'
|
||||
- 'go.*'
|
||||
@@ -19,7 +18,6 @@ on:
|
||||
- '.github/workflows/e2e.yml'
|
||||
- 'api/**'
|
||||
- 'controllers/**'
|
||||
- 'pkg/**'
|
||||
- 'e2e/*'
|
||||
- 'Dockerfile'
|
||||
- 'go.*'
|
||||
@@ -30,9 +28,8 @@ jobs:
|
||||
kind:
|
||||
name: Kubernetes
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.4', 'v1.20.7', 'v1.21.2', 'v1.22.4', 'v1.23.6', 'v1.24.1']
|
||||
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.4', 'v1.20.7', 'v1.21.2', 'v1.22.0']
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -40,16 +37,21 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '^1.16'
|
||||
- run: make manifests
|
||||
- name: Checking if manifests are disaligned
|
||||
run: test -z "$(git diff 2> /dev/null)"
|
||||
- name: Checking if manifests generated untracked files
|
||||
run: test -z "$(git ls-files --others --exclude-standard 2> /dev/null)"
|
||||
- name: Installing Ginkgo
|
||||
run: go get github.com/onsi/ginkgo/ginkgo
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.16'
|
||||
- uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
skipClusterCreation: true
|
||||
version: v0.14.0
|
||||
version: v0.11.1
|
||||
- uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: 3.3.4
|
||||
|
||||
3
.github/workflows/helm.yml
vendored
@@ -6,6 +6,9 @@ on:
|
||||
tags: [ "helm-v*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
create:
|
||||
branches: [ "*" ]
|
||||
tags: [ "helm-v*" ]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
|
||||
1
.gitignore
vendored
@@ -28,5 +28,4 @@ bin
|
||||
**/*.crt
|
||||
**/*.key
|
||||
.DS_Store
|
||||
*.tgz
|
||||
|
||||
|
||||
@@ -1,39 +1,51 @@
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
goimports:
|
||||
local-prefixes: github.com/clastix/capsule
|
||||
dupl:
|
||||
threshold: 100
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 2
|
||||
cyclop:
|
||||
max-complexity: 27
|
||||
gocognit:
|
||||
min-complexity: 50
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(github.com/clastix/capsule)
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- funlen
|
||||
- gochecknoinits
|
||||
- lll
|
||||
- exhaustivestruct
|
||||
- maligned
|
||||
- interfacer
|
||||
- scopelint
|
||||
disable-all: true
|
||||
enable:
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
- goconst
|
||||
- gocritic
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- gochecknoglobals
|
||||
- goerr113
|
||||
- gomnd
|
||||
- paralleltest
|
||||
- ireturn
|
||||
- testpackage
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- interfacer
|
||||
- misspell
|
||||
- nolintlint
|
||||
- rowserrcheck
|
||||
- scopelint
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- varcheck
|
||||
- whitespace
|
||||
- maligned
|
||||
|
||||
issues:
|
||||
exclude:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.18 as builder
|
||||
FROM golang:1.16 as builder
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG GIT_HEAD_COMMIT
|
||||
|
||||
45
Makefile
@@ -2,7 +2,7 @@
|
||||
VERSION ?= $$(git describe --abbrev=0 --tags --match "v*")
|
||||
|
||||
# Default bundle image tag
|
||||
BUNDLE_IMG ?= clastix/capsule:$(VERSION)-bundle
|
||||
BUNDLE_IMG ?= quay.io/clastix/capsule:$(VERSION)-bundle
|
||||
# Options for 'bundle-build'
|
||||
ifneq ($(origin CHANNELS), undefined)
|
||||
BUNDLE_CHANNELS := --channels=$(CHANNELS)
|
||||
@@ -13,7 +13,7 @@ endif
|
||||
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
|
||||
|
||||
# Image URL to use all building/pushing image targets
|
||||
IMG ?= clastix/capsule:$(VERSION)
|
||||
IMG ?= quay.io/clastix/capsule:$(VERSION)
|
||||
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
||||
CRD_OPTIONS ?= "crd:preserveUnknownFields=false"
|
||||
|
||||
@@ -40,8 +40,8 @@ test: generate manifests
|
||||
go test ./... -coverprofile cover.out
|
||||
|
||||
# Build manager binary
|
||||
manager: generate golint
|
||||
go build -o bin/manager
|
||||
manager: generate fmt vet
|
||||
go build -o bin/manager main.go
|
||||
|
||||
# Run against the configured Kubernetes cluster in ~/.kube/config
|
||||
run: generate manifests
|
||||
@@ -126,8 +126,7 @@ dev-setup:
|
||||
{'op': 'replace', 'path': '/webhooks/4/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/pods\",'caBundle':\"$${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/5/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/persistentvolumeclaims\",'caBundle':\"$${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/6/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/services\",'caBundle':\"$${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/7/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/tenants\",'caBundle':\"$${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/8/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/nodes\",'caBundle':\"$${CA_BUNDLE}\"}}\
|
||||
{'op': 'replace', 'path': '/webhooks/7/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/tenants\",'caBundle':\"$${CA_BUNDLE}\"}}\
|
||||
]";
|
||||
|
||||
# Build the docker image
|
||||
@@ -145,33 +144,23 @@ docker-push:
|
||||
|
||||
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
|
||||
controller-gen: ## Download controller-gen locally if necessary.
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.5.0)
|
||||
|
||||
GINKGO = $(shell pwd)/bin/ginkgo
|
||||
ginkgo: ## Download ginkgo locally if necessary.
|
||||
$(call go-install-tool,$(KUSTOMIZE),github.com/onsi/ginkgo/ginkgo@v1.16.5)
|
||||
$(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.5.0)
|
||||
|
||||
KUSTOMIZE = $(shell pwd)/bin/kustomize
|
||||
kustomize: ## Download kustomize locally if necessary.
|
||||
$(call install-kustomize,$(KUSTOMIZE),3.8.7)
|
||||
$(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7)
|
||||
|
||||
define install-kustomize
|
||||
@[ -f $(1) ] || { \
|
||||
set -e ;\
|
||||
echo "Installing v$(2)" ;\
|
||||
cd bin ;\
|
||||
wget "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" ;\
|
||||
bash ./install_kustomize.sh $(2) ;\
|
||||
}
|
||||
endef
|
||||
|
||||
# go-install-tool will 'go install' any package $2 and install it to $1.
|
||||
# go-get-tool will 'go get' any package $2 and install it to $1.
|
||||
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
define go-install-tool
|
||||
define go-get-tool
|
||||
@[ -f $(1) ] || { \
|
||||
set -e ;\
|
||||
echo "Installing $(2)" ;\
|
||||
GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\
|
||||
TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$TMP_DIR ;\
|
||||
go mod init tmp ;\
|
||||
echo "Downloading $(2)" ;\
|
||||
GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\
|
||||
rm -rf $$TMP_DIR ;\
|
||||
}
|
||||
endef
|
||||
|
||||
@@ -197,7 +186,7 @@ golint:
|
||||
|
||||
# Running e2e tests in a KinD instance
|
||||
.PHONY: e2e
|
||||
e2e/%: ginkgo
|
||||
e2e/%:
|
||||
kind create cluster --name capsule --image=kindest/node:$*
|
||||
make docker-build
|
||||
kind load docker-image --nodes capsule-control-plane --name capsule $(IMG)
|
||||
@@ -213,5 +202,5 @@ e2e/%: ginkgo
|
||||
--set 'manager.readinessProbe.failureThreshold=10' \
|
||||
capsule \
|
||||
./charts/capsule
|
||||
$(GINKGO) -v -tags e2e ./e2e
|
||||
ginkgo -v -tags e2e ./e2e
|
||||
kind delete cluster --name capsule
|
||||
|
||||
162
README.md
@@ -5,9 +5,6 @@
|
||||
<a href="https://github.com/clastix/capsule/releases">
|
||||
<img src="https://img.shields.io/github/v/release/clastix/capsule"/>
|
||||
</a>
|
||||
<a href="https://charmhub.io/capsule-k8s">
|
||||
<img src="https://charmhub.io/capsule-k8s/badge.svg"/>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@@ -16,80 +13,161 @@
|
||||
|
||||
---
|
||||
|
||||
**Join the community** on the [#capsule](https://kubernetes.slack.com/archives/C03GETTJQRL) channel in the [Kubernetes Slack](https://slack.k8s.io/).
|
||||
|
||||
# Kubernetes multi-tenancy made easy
|
||||
|
||||
**Capsule** implements a multi-tenant and policy-based environment in your Kubernetes cluster. It is designed as a micro-services-based ecosystem with the minimalist approach, leveraging only on upstream Kubernetes.
|
||||
**Capsule** helps to implement a multi-tenancy and policy-based environment in your Kubernetes cluster. It is not intended to be yet another _PaaS_, instead, it has been designed as a micro-services-based ecosystem with the minimalist approach, leveraging only on upstream Kubernetes.
|
||||
|
||||
# What's the problem with the current status?
|
||||
|
||||
Kubernetes introduces the _Namespace_ object type to create logical partitions of the cluster as isolated *slices*. However, implementing advanced multi-tenancy scenarios, it soon becomes complicated because of the flat structure of Kubernetes namespaces and the impossibility to share resources among namespaces belonging to the same tenant. To overcome this, cluster admins tend to provision a dedicated cluster for each groups of users, teams, or departments. As an organization grows, the number of clusters to manage and keep aligned becomes an operational nightmare, described as the well known phenomena of the _clusters sprawl_.
|
||||
Kubernetes introduces the _Namespace_ object type to create logical partitions of the cluster as isolated *slices*. However, implementing advanced multi-tenancy scenarios, it soon becomes complicated because of the flat structure of Kubernetes namespaces and the impossibility to share resources among namespaces belonging to the same tenant. To overcome this, cluster admins tend to provision a dedicated cluster for each groups of users, teams, or departments. As an organization grows, the number of clusters to manage and keep aligned becomes an operational nightmare, described as the well know phenomena of the _clusters sprawl_.
|
||||
|
||||
|
||||
# Entering Capsule
|
||||
Capsule takes a different approach. In a single cluster, the Capsule Controller aggregates multiple namespaces in a lightweight abstraction called _Tenant_, basically a grouping of Kubernetes Namespaces. Within each tenant, users are free to create their namespaces and share all the assigned resources while the Capsule Policy Engine keeps the different tenants isolated from each other.
|
||||
|
||||
Capsule takes a different approach. In a single cluster, the Capsule Controller aggregates multiple namespaces in a lightweight abstraction called _Tenant_, basically a grouping of Kubernetes Namespaces. Within each tenant, users are free to create their namespaces and share all the assigned resources.
|
||||
The _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant. Then users are free to operate their tenants in autonomy, without the intervention of the cluster administrator. Take a look at following diagram:
|
||||
|
||||
On the other side, the Capsule Policy Engine keeps the different tenants isolated from each other. _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant. Then users are free to operate their tenants in autonomy, without the intervention of the cluster administrator.
|
||||
<p align="center" style="padding: 60px 20px">
|
||||
<img src="assets/capsule-operator.svg" />
|
||||
</p>
|
||||
|
||||
# Features
|
||||
|
||||
## Self-Service
|
||||
|
||||
Leave developers the freedom to self-provision their cluster resources according to the assigned boundaries.
|
||||
Leave to developers the freedom to self-provision their cluster resources according to the assigned boundaries.
|
||||
|
||||
## Preventing Clusters Sprawl
|
||||
|
||||
Share a single cluster with multiple teams, groups of users, or departments by saving operational and management efforts.
|
||||
|
||||
## Governance
|
||||
|
||||
Leverage Kubernetes Admission Controllers to enforce the industry security best practices and meet policy requirements.
|
||||
Leverage Kubernetes Admission Controllers to enforce the industry security best practices and meet legal requirements.
|
||||
|
||||
## Resources Control
|
||||
|
||||
Take control of the resources consumed by users while preventing them to overtake.
|
||||
|
||||
## Native Experience
|
||||
|
||||
Provide multi-tenancy with a native Kubernetes experience without introducing additional management layers, plugins, or customized binaries.
|
||||
|
||||
## GitOps ready
|
||||
|
||||
Capsule is completely declarative and GitOps ready.
|
||||
|
||||
## Bring your own device (BYOD)
|
||||
|
||||
Assign to tenants a dedicated set of compute, storage, and network resources and avoid the noisy neighbors' effect.
|
||||
|
||||
# Common use cases for Capsule
|
||||
Please, refer to the corresponding [section](./docs/operator/use-cases/overview.md) in the project documentation for a detailed list of common use cases that Capsule can address.
|
||||
|
||||
# Installation
|
||||
Make sure you have access to a Kubernetes cluster as administrator.
|
||||
|
||||
There are two ways to install Capsule:
|
||||
|
||||
* Use the Helm Chart available [here](./charts/capsule/README.md)
|
||||
* Use the [single YAML file installer](./config/install.yaml)
|
||||
|
||||
## Install with the single YAML file installer
|
||||
|
||||
Ensure you have `kubectl` installed in your `PATH`.
|
||||
|
||||
Clone this repository and move to the repo folder:
|
||||
|
||||
```
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/clastix/capsule/master/config/install.yaml
|
||||
```
|
||||
|
||||
It will install the Capsule controller in a dedicated namespace `capsule-system`.
|
||||
|
||||
## How to create Tenants
|
||||
Use the scaffold [Tenant](config/samples/capsule_v1beta1_tenant.yaml) and simply apply as cluster admin.
|
||||
|
||||
```
|
||||
$ kubectl apply -f config/samples/capsule_v1beta1_tenant.yaml
|
||||
tenant.capsule.clastix.io/gas created
|
||||
```
|
||||
|
||||
You can check the tenant just created as
|
||||
|
||||
```
|
||||
$ kubectl get tenants
|
||||
NAME STATE NAMESPACE QUOTA NAMESPACE COUNT NODE SELECTOR AGE
|
||||
gas Active 3 0 {"kubernetes.io/os":"linux"} 25s
|
||||
```
|
||||
|
||||
## Tenant owners
|
||||
Each tenant comes with a delegated user or group of users acting as the tenant admin. In the Capsule jargon, this is called the _Tenant Owner_. Other users can operate inside a tenant with different levels of permissions and authorizations assigned directly by the Tenant Owner.
|
||||
|
||||
Capsule does not care about the authentication strategy used in the cluster and all the Kubernetes methods of [authentication](https://kubernetes.io/docs/reference/access-authn-authz/authentication/) are supported. The only requirement to use Capsule is to assign tenant users to the the group defined by `--capsule-user-group` option, which defaults to `capsule.clastix.io`.
|
||||
|
||||
Assignment to a group depends on the authentication strategy in your cluster.
|
||||
|
||||
For example, if you are using `capsule.clastix.io`, users authenticated through a _X.509_ certificate must have `capsule.clastix.io` as _Organization_: `-subj "/CN=${USER}/O=capsule.clastix.io"`
|
||||
|
||||
Users authenticated through an _OIDC token_ must have in their token:
|
||||
|
||||
```json
|
||||
...
|
||||
"users_groups": [
|
||||
"capsule.clastix.io",
|
||||
"other_group"
|
||||
]
|
||||
```
|
||||
|
||||
The [hack/create-user.sh](hack/create-user.sh) can help you set up a dummy `kubeconfig` for the `bob` user acting as owner of a tenant called `gas`
|
||||
|
||||
```bash
|
||||
./hack/create-user.sh bob gas
|
||||
...
|
||||
certificatesigningrequest.certificates.k8s.io/bob-gas created
|
||||
certificatesigningrequest.certificates.k8s.io/bob-gas approved
|
||||
kubeconfig file is: bob-gas.kubeconfig
|
||||
to use it as bob export KUBECONFIG=bob-gas.kubeconfig
|
||||
```
|
||||
|
||||
## Working with Tenants
|
||||
Log in to the Kubernetes cluster as `bob` tenant owner
|
||||
|
||||
```
|
||||
$ export KUBECONFIG=bob-gas.kubeconfig
|
||||
```
|
||||
|
||||
and create a couple of new namespaces
|
||||
|
||||
```
|
||||
$ kubectl create namespace gas-production
|
||||
$ kubectl create namespace gas-development
|
||||
```
|
||||
|
||||
As user `bob` you can operate with fully admin permissions:
|
||||
|
||||
```
|
||||
$ kubectl -n gas-development run nginx --image=docker.io/nginx
|
||||
$ kubectl -n gas-development get pods
|
||||
```
|
||||
|
||||
but limited to only your own namespaces:
|
||||
|
||||
```
|
||||
$ kubectl -n kube-system get pods
|
||||
Error from server (Forbidden): pods is forbidden:
|
||||
User "bob" cannot list resource "pods" in API group "" in the namespace "kube-system"
|
||||
```
|
||||
|
||||
# Removal
|
||||
Similar to `deploy`, you can get rid of Capsule using the `remove` target.
|
||||
|
||||
```
|
||||
$ make remove
|
||||
```
|
||||
|
||||
# Documentation
|
||||
Please, check the project [documentation](./docs/index.md) for more cool things you can do with Capsule.
|
||||
|
||||
Please, check the project [documentation](https://capsule.clastix.io) for the cool things you can do with Capsule.
|
||||
|
||||
# Contributions
|
||||
|
||||
# Contribution
|
||||
Capsule is Open Source with Apache 2 license and any contribution is welcome.
|
||||
|
||||
## Chart Development
|
||||
|
||||
The documentation for each chart is done with [helm-docs](https://github.com/norwoodj/helm-docs). This way we can ensure that values are consistent with the chart documentation.
|
||||
|
||||
We have a script on the repository which will execute the helm-docs docker container, so that you don't have to worry about downloading the binary etc. Simply execute the script (Bash compatible):
|
||||
|
||||
```
|
||||
bash scripts/helm-docs.sh
|
||||
```
|
||||
|
||||
## Community
|
||||
|
||||
Join the community, share and learn from it. You can find all the resources to how to contribute code and docs, connect with people in the [community repository](https://github.com/clastix/capsule-community).
|
||||
|
||||
# Governance
|
||||
|
||||
You can find how the Capsule project is governed [here](https://capsule.clastix.io/docs/contributing/governance).
|
||||
Please refer to the corresponding docs:
|
||||
- [contributing.md](./docs/contributing.md) for the general guide; and
|
||||
- [dev-guide.md](./docs/dev-guide.md) for how to set up the development env to get started.
|
||||
|
||||
# FAQ
|
||||
|
||||
- Q. How to pronounce Capsule?
|
||||
|
||||
A. It should be pronounced as `/ˈkæpsjuːl/`.
|
||||
|
||||
@@ -19,12 +19,9 @@ func (in *AllowedListSpec) ExactMatch(value string) (ok bool) {
|
||||
sort.SliceStable(in.Exact, func(i, j int) bool {
|
||||
return strings.ToLower(in.Exact[i]) < strings.ToLower(in.Exact[j])
|
||||
})
|
||||
|
||||
i := sort.SearchStrings(in.Exact, value)
|
||||
|
||||
ok = i < len(in.Exact) && in.Exact[i] == value
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -32,6 +29,5 @@ func (in AllowedListSpec) RegexMatch(value string) (ok bool) {
|
||||
if len(in.Regex) > 0 {
|
||||
ok = regexp.MustCompile(in.Regex).MatchString(value)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ func TestAllowedListSpec_ExactMatch(t *testing.T) {
|
||||
True []string
|
||||
False []string
|
||||
}
|
||||
|
||||
for _, tc := range []tc{
|
||||
{
|
||||
[]string{"foo", "bar", "bizz", "buzz"},
|
||||
@@ -36,11 +35,9 @@ func TestAllowedListSpec_ExactMatch(t *testing.T) {
|
||||
a := AllowedListSpec{
|
||||
Exact: tc.In,
|
||||
}
|
||||
|
||||
for _, ok := range tc.True {
|
||||
assert.True(t, a.ExactMatch(ok))
|
||||
}
|
||||
|
||||
for _, ko := range tc.False {
|
||||
assert.False(t, a.ExactMatch(ko))
|
||||
}
|
||||
@@ -53,7 +50,6 @@ func TestAllowedListSpec_RegexMatch(t *testing.T) {
|
||||
True []string
|
||||
False []string
|
||||
}
|
||||
|
||||
for _, tc := range []tc{
|
||||
{`first-\w+-pattern`, []string{"first-date-pattern", "first-year-pattern"}, []string{"broken", "first-year", "second-date-pattern"}},
|
||||
{``, nil, []string{"any", "value"}},
|
||||
@@ -61,11 +57,9 @@ func TestAllowedListSpec_RegexMatch(t *testing.T) {
|
||||
a := AllowedListSpec{
|
||||
Regex: tc.Regex,
|
||||
}
|
||||
|
||||
for _, ok := range tc.True {
|
||||
assert.True(t, a.RegexMatch(ok))
|
||||
}
|
||||
|
||||
for _, ko := range tc.False {
|
||||
assert.False(t, a.RegexMatch(ko))
|
||||
}
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
const (
|
||||
ForbiddenNodeLabelsAnnotation = "capsule.clastix.io/forbidden-node-labels"
|
||||
ForbiddenNodeLabelsRegexpAnnotation = "capsule.clastix.io/forbidden-node-labels-regexp"
|
||||
ForbiddenNodeAnnotationsAnnotation = "capsule.clastix.io/forbidden-node-annotations"
|
||||
ForbiddenNodeAnnotationsRegexpAnnotation = "capsule.clastix.io/forbidden-node-annotations-regexp"
|
||||
TLSSecretNameAnnotation = "capsule.clastix.io/tls-secret-name"
|
||||
MutatingWebhookConfigurationName = "capsule.clastix.io/mutating-webhook-configuration-name"
|
||||
ValidatingWebhookConfigurationName = "capsule.clastix.io/validating-webhook-configuration-name"
|
||||
EnableTLSConfigurationAnnotationName = "capsule.clastix.io/enable-tls-configuration"
|
||||
)
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// CapsuleConfigurationSpec defines the Capsule configuration.
|
||||
// CapsuleConfigurationSpec defines the Capsule configuration
|
||||
type CapsuleConfigurationSpec struct {
|
||||
// Names of the groups for Capsule users.
|
||||
// +kubebuilder:default={capsule.clastix.io}
|
||||
@@ -23,7 +23,7 @@ type CapsuleConfigurationSpec struct {
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:scope=Cluster
|
||||
|
||||
// CapsuleConfiguration is the Schema for the Capsule configuration API.
|
||||
// CapsuleConfiguration is the Schema for the Capsule configuration API
|
||||
type CapsuleConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
@@ -33,7 +33,7 @@ type CapsuleConfiguration struct {
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// CapsuleConfigurationList contains a list of CapsuleConfiguration.
|
||||
// CapsuleConfigurationList contains a list of CapsuleConfiguration
|
||||
type CapsuleConfigurationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
@@ -49,13 +49,13 @@ const (
|
||||
)
|
||||
|
||||
func (t *Tenant) convertV1Alpha1OwnerToV1Beta1() capsulev1beta1.OwnerListSpec {
|
||||
serviceKindToAnnotationMap := map[capsulev1beta1.ProxyServiceKind][]string{
|
||||
var serviceKindToAnnotationMap = map[capsulev1beta1.ProxyServiceKind][]string{
|
||||
capsulev1beta1.NodesProxy: {enableNodeListingAnnotation, enableNodeUpdateAnnotation, enableNodeDeletionAnnotation},
|
||||
capsulev1beta1.StorageClassesProxy: {enableStorageClassListingAnnotation, enableStorageClassUpdateAnnotation, enableStorageClassDeletionAnnotation},
|
||||
capsulev1beta1.IngressClassesProxy: {enableIngressClassListingAnnotation, enableIngressClassUpdateAnnotation, enableIngressClassDeletionAnnotation},
|
||||
capsulev1beta1.PriorityClassesProxy: {enablePriorityClassListingAnnotation, enablePriorityClassUpdateAnnotation, enablePriorityClassDeletionAnnotation},
|
||||
}
|
||||
annotationToOperationMap := map[string]capsulev1beta1.ProxyOperation{
|
||||
var annotationToOperationMap = map[string]capsulev1beta1.ProxyOperation{
|
||||
enableNodeListingAnnotation: capsulev1beta1.ListOperation,
|
||||
enableNodeUpdateAnnotation: capsulev1beta1.UpdateOperation,
|
||||
enableNodeDeletionAnnotation: capsulev1beta1.DeleteOperation,
|
||||
@@ -69,15 +69,14 @@ func (t *Tenant) convertV1Alpha1OwnerToV1Beta1() capsulev1beta1.OwnerListSpec {
|
||||
enablePriorityClassUpdateAnnotation: capsulev1beta1.UpdateOperation,
|
||||
enablePriorityClassDeletionAnnotation: capsulev1beta1.DeleteOperation,
|
||||
}
|
||||
annotationToOwnerKindMap := map[string]capsulev1beta1.OwnerKind{
|
||||
var annotationToOwnerKindMap = map[string]capsulev1beta1.OwnerKind{
|
||||
ownerUsersAnnotation: capsulev1beta1.UserOwner,
|
||||
ownerGroupsAnnotation: capsulev1beta1.GroupOwner,
|
||||
ownerServiceAccountAnnotation: capsulev1beta1.ServiceAccountOwner,
|
||||
}
|
||||
|
||||
annotations := t.GetAnnotations()
|
||||
|
||||
operations := make(map[string]map[capsulev1beta1.ProxyServiceKind][]capsulev1beta1.ProxyOperation)
|
||||
var operations = make(map[string]map[capsulev1beta1.ProxyServiceKind][]capsulev1beta1.ProxyOperation)
|
||||
|
||||
for serviceKind, operationAnnotations := range serviceKindToAnnotationMap {
|
||||
for _, operationAnnotation := range operationAnnotations {
|
||||
@@ -87,7 +86,6 @@ func (t *Tenant) convertV1Alpha1OwnerToV1Beta1() capsulev1beta1.OwnerListSpec {
|
||||
if _, exists := operations[owner]; !exists {
|
||||
operations[owner] = make(map[capsulev1beta1.ProxyServiceKind][]capsulev1beta1.ProxyOperation)
|
||||
}
|
||||
|
||||
operations[owner][serviceKind] = append(operations[owner][serviceKind], annotationToOperationMap[operationAnnotation])
|
||||
}
|
||||
}
|
||||
@@ -96,7 +94,7 @@ func (t *Tenant) convertV1Alpha1OwnerToV1Beta1() capsulev1beta1.OwnerListSpec {
|
||||
|
||||
var owners capsulev1beta1.OwnerListSpec
|
||||
|
||||
getProxySettingsForOwner := func(ownerName string) (settings []capsulev1beta1.ProxySettings) {
|
||||
var getProxySettingsForOwner = func(ownerName string) (settings []capsulev1beta1.ProxySettings) {
|
||||
ownerOperations, ok := operations[ownerName]
|
||||
if ok {
|
||||
for k, v := range ownerOperations {
|
||||
@@ -106,7 +104,6 @@ func (t *Tenant) convertV1Alpha1OwnerToV1Beta1() capsulev1beta1.OwnerListSpec {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -132,13 +129,8 @@ func (t *Tenant) convertV1Alpha1OwnerToV1Beta1() capsulev1beta1.OwnerListSpec {
|
||||
return owners
|
||||
}
|
||||
|
||||
// nolint:gocognit,gocyclo,cyclop,maintidx
|
||||
func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
dst, ok := dstRaw.(*capsulev1beta1.Tenant)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected type *capsulev1beta1.Tenant, got %T", dst)
|
||||
}
|
||||
|
||||
dst := dstRaw.(*capsulev1beta1.Tenant)
|
||||
annotations := t.GetAnnotations()
|
||||
|
||||
// ObjectMeta
|
||||
@@ -149,7 +141,6 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
if dst.Spec.NamespaceOptions == nil {
|
||||
dst.Spec.NamespaceOptions = &capsulev1beta1.NamespaceOptions{}
|
||||
}
|
||||
|
||||
dst.Spec.NamespaceOptions.Quota = t.Spec.NamespaceQuota
|
||||
}
|
||||
|
||||
@@ -161,13 +152,11 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
if dst.Spec.NamespaceOptions == nil {
|
||||
dst.Spec.NamespaceOptions = &capsulev1beta1.NamespaceOptions{}
|
||||
}
|
||||
|
||||
dst.Spec.NamespaceOptions.AdditionalMetadata = &capsulev1beta1.AdditionalMetadataSpec{
|
||||
Labels: t.Spec.NamespacesMetadata.AdditionalLabels,
|
||||
Annotations: t.Spec.NamespacesMetadata.AdditionalAnnotations,
|
||||
}
|
||||
}
|
||||
|
||||
if t.Spec.ServicesMetadata != nil {
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{
|
||||
@@ -178,15 +167,13 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if t.Spec.StorageClasses != nil {
|
||||
dst.Spec.StorageClasses = &capsulev1beta1.AllowedListSpec{
|
||||
Exact: t.Spec.StorageClasses.Exact,
|
||||
Regex: t.Spec.StorageClasses.Regex,
|
||||
}
|
||||
}
|
||||
|
||||
if v, annotationOk := t.Annotations[ingressHostnameCollisionScope]; annotationOk {
|
||||
if v, ok := t.Annotations[ingressHostnameCollisionScope]; ok {
|
||||
switch v {
|
||||
case string(capsulev1beta1.HostnameCollisionScopeCluster), string(capsulev1beta1.HostnameCollisionScopeTenant), string(capsulev1beta1.HostnameCollisionScopeNamespace):
|
||||
dst.Spec.IngressOptions.HostnameCollisionScope = capsulev1beta1.HostnameCollisionScope(v)
|
||||
@@ -194,44 +181,38 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
dst.Spec.IngressOptions.HostnameCollisionScope = capsulev1beta1.HostnameCollisionScopeDisabled
|
||||
}
|
||||
}
|
||||
|
||||
if t.Spec.IngressClasses != nil {
|
||||
dst.Spec.IngressOptions.AllowedClasses = &capsulev1beta1.AllowedListSpec{
|
||||
Exact: t.Spec.IngressClasses.Exact,
|
||||
Regex: t.Spec.IngressClasses.Regex,
|
||||
}
|
||||
}
|
||||
|
||||
if t.Spec.IngressHostnames != nil {
|
||||
dst.Spec.IngressOptions.AllowedHostnames = &capsulev1beta1.AllowedListSpec{
|
||||
Exact: t.Spec.IngressHostnames.Exact,
|
||||
Regex: t.Spec.IngressHostnames.Regex,
|
||||
}
|
||||
}
|
||||
|
||||
if t.Spec.ContainerRegistries != nil {
|
||||
dst.Spec.ContainerRegistries = &capsulev1beta1.AllowedListSpec{
|
||||
Exact: t.Spec.ContainerRegistries.Exact,
|
||||
Regex: t.Spec.ContainerRegistries.Regex,
|
||||
}
|
||||
}
|
||||
|
||||
if len(t.Spec.NetworkPolicies) > 0 {
|
||||
dst.Spec.NetworkPolicies = capsulev1beta1.NetworkPolicySpec{
|
||||
Items: t.Spec.NetworkPolicies,
|
||||
}
|
||||
}
|
||||
|
||||
if len(t.Spec.LimitRanges) > 0 {
|
||||
dst.Spec.LimitRanges = capsulev1beta1.LimitRangesSpec{
|
||||
Items: t.Spec.LimitRanges,
|
||||
}
|
||||
}
|
||||
|
||||
if len(t.Spec.ResourceQuota) > 0 {
|
||||
dst.Spec.ResourceQuota = capsulev1beta1.ResourceQuotaSpec{
|
||||
Scope: func() capsulev1beta1.ResourceQuotaScope {
|
||||
if v, annotationOk := t.GetAnnotations()[resourceQuotaScopeAnnotation]; annotationOk {
|
||||
if v, ok := t.GetAnnotations()[resourceQuotaScopeAnnotation]; ok {
|
||||
switch v {
|
||||
case string(capsulev1beta1.ResourceQuotaScopeNamespace):
|
||||
return capsulev1beta1.ResourceQuotaScopeNamespace
|
||||
@@ -239,13 +220,11 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
return capsulev1beta1.ResourceQuotaScopeTenant
|
||||
}
|
||||
}
|
||||
|
||||
return capsulev1beta1.ResourceQuotaScopeTenant
|
||||
}(),
|
||||
Items: t.Spec.ResourceQuota,
|
||||
}
|
||||
}
|
||||
|
||||
if len(t.Spec.AdditionalRoleBindings) > 0 {
|
||||
for _, rb := range t.Spec.AdditionalRoleBindings {
|
||||
dst.Spec.AdditionalRoleBindings = append(dst.Spec.AdditionalRoleBindings, capsulev1beta1.AdditionalRoleBindingsSpec{
|
||||
@@ -254,12 +233,10 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if t.Spec.ExternalServiceIPs != nil {
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{}
|
||||
}
|
||||
|
||||
dst.Spec.ServiceOptions.ExternalServiceIPs = &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
Allowed: make([]capsulev1beta1.AllowedIP, len(t.Spec.ExternalServiceIPs.Allowed)),
|
||||
}
|
||||
@@ -279,13 +256,10 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
priorityClasses := capsulev1beta1.AllowedListSpec{}
|
||||
|
||||
priorityClassAllowed, ok := annotations[podPriorityAllowedAnnotation]
|
||||
|
||||
if ok {
|
||||
priorityClasses.Exact = strings.Split(priorityClassAllowed, ",")
|
||||
}
|
||||
|
||||
priorityClassesRegexp, ok := annotations[podPriorityAllowedRegexAnnotation]
|
||||
|
||||
if ok {
|
||||
priorityClasses.Regex = priorityClassesRegexp
|
||||
}
|
||||
@@ -300,15 +274,12 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("unable to parse %s annotation on tenant %s", enableNodePortsAnnotation, t.GetName()))
|
||||
}
|
||||
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{}
|
||||
}
|
||||
|
||||
if dst.Spec.ServiceOptions.AllowedServices == nil {
|
||||
dst.Spec.ServiceOptions.AllowedServices = &capsulev1beta1.AllowedServices{}
|
||||
}
|
||||
|
||||
dst.Spec.ServiceOptions.AllowedServices.NodePort = pointer.BoolPtr(val)
|
||||
}
|
||||
|
||||
@@ -318,15 +289,12 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("unable to parse %s annotation on tenant %s", enableExternalNameAnnotation, t.GetName()))
|
||||
}
|
||||
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{}
|
||||
}
|
||||
|
||||
if dst.Spec.ServiceOptions.AllowedServices == nil {
|
||||
dst.Spec.ServiceOptions.AllowedServices = &capsulev1beta1.AllowedServices{}
|
||||
}
|
||||
|
||||
dst.Spec.ServiceOptions.AllowedServices.ExternalName = pointer.BoolPtr(val)
|
||||
}
|
||||
|
||||
@@ -336,22 +304,21 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("unable to parse %s annotation on tenant %s", enableLoadBalancerAnnotation, t.GetName()))
|
||||
}
|
||||
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{}
|
||||
}
|
||||
|
||||
if dst.Spec.ServiceOptions.AllowedServices == nil {
|
||||
dst.Spec.ServiceOptions.AllowedServices = &capsulev1beta1.AllowedServices{}
|
||||
}
|
||||
|
||||
dst.Spec.ServiceOptions.AllowedServices.LoadBalancer = pointer.BoolPtr(val)
|
||||
}
|
||||
|
||||
// Status
|
||||
dst.Status = capsulev1beta1.TenantStatus{
|
||||
Size: t.Status.Size,
|
||||
Namespaces: t.Status.Namespaces,
|
||||
}
|
||||
|
||||
// Remove unneeded annotations
|
||||
delete(dst.ObjectMeta.Annotations, podAllowedImagePullPolicyAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, podPriorityAllowedAnnotation)
|
||||
@@ -380,15 +347,14 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// nolint:gocognit,gocyclo,cyclop
|
||||
func (t *Tenant) convertV1Beta1OwnerToV1Alpha1(src *capsulev1beta1.Tenant) {
|
||||
ownersAnnotations := map[string][]string{
|
||||
var ownersAnnotations = map[string][]string{
|
||||
ownerGroupsAnnotation: nil,
|
||||
ownerUsersAnnotation: nil,
|
||||
ownerServiceAccountAnnotation: nil,
|
||||
}
|
||||
|
||||
proxyAnnotations := map[string][]string{
|
||||
var proxyAnnotations = map[string][]string{
|
||||
enableNodeListingAnnotation: nil,
|
||||
enableNodeUpdateAnnotation: nil,
|
||||
enableNodeDeletionAnnotation: nil,
|
||||
@@ -416,7 +382,6 @@ func (t *Tenant) convertV1Beta1OwnerToV1Alpha1(src *capsulev1beta1.Tenant) {
|
||||
ownersAnnotations[ownerServiceAccountAnnotation] = append(ownersAnnotations[ownerServiceAccountAnnotation], owner.Name)
|
||||
}
|
||||
}
|
||||
|
||||
for _, setting := range owner.ProxyOperations {
|
||||
switch setting.Kind {
|
||||
case capsulev1beta1.NodesProxy:
|
||||
@@ -472,7 +437,6 @@ func (t *Tenant) convertV1Beta1OwnerToV1Alpha1(src *capsulev1beta1.Tenant) {
|
||||
t.Annotations[k] = strings.Join(v, ",")
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range proxyAnnotations {
|
||||
if len(v) > 0 {
|
||||
t.Annotations[k] = strings.Join(v, ",")
|
||||
@@ -480,12 +444,8 @@ func (t *Tenant) convertV1Beta1OwnerToV1Alpha1(src *capsulev1beta1.Tenant) {
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:gocyclo,cyclop
|
||||
func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
src, ok := srcRaw.(*capsulev1beta1.Tenant)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *capsulev1beta1.Tenant, got %T", srcRaw)
|
||||
}
|
||||
src := srcRaw.(*capsulev1beta1.Tenant)
|
||||
|
||||
// ObjectMeta
|
||||
t.ObjectMeta = src.ObjectMeta
|
||||
@@ -509,57 +469,47 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
AdditionalAnnotations: src.Spec.NamespaceOptions.AdditionalMetadata.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.AdditionalMetadata != nil {
|
||||
t.Spec.ServicesMetadata = &AdditionalMetadataSpec{
|
||||
AdditionalLabels: src.Spec.ServiceOptions.AdditionalMetadata.Labels,
|
||||
AdditionalAnnotations: src.Spec.ServiceOptions.AdditionalMetadata.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
if src.Spec.StorageClasses != nil {
|
||||
t.Spec.StorageClasses = &AllowedListSpec{
|
||||
Exact: src.Spec.StorageClasses.Exact,
|
||||
Regex: src.Spec.StorageClasses.Regex,
|
||||
}
|
||||
}
|
||||
|
||||
t.Annotations[ingressHostnameCollisionScope] = string(src.Spec.IngressOptions.HostnameCollisionScope)
|
||||
|
||||
if src.Spec.IngressOptions.AllowedClasses != nil {
|
||||
t.Spec.IngressClasses = &AllowedListSpec{
|
||||
Exact: src.Spec.IngressOptions.AllowedClasses.Exact,
|
||||
Regex: src.Spec.IngressOptions.AllowedClasses.Regex,
|
||||
}
|
||||
}
|
||||
|
||||
if src.Spec.IngressOptions.AllowedHostnames != nil {
|
||||
t.Spec.IngressHostnames = &AllowedListSpec{
|
||||
Exact: src.Spec.IngressOptions.AllowedHostnames.Exact,
|
||||
Regex: src.Spec.IngressOptions.AllowedHostnames.Regex,
|
||||
}
|
||||
}
|
||||
|
||||
if src.Spec.ContainerRegistries != nil {
|
||||
t.Spec.ContainerRegistries = &AllowedListSpec{
|
||||
Exact: src.Spec.ContainerRegistries.Exact,
|
||||
Regex: src.Spec.ContainerRegistries.Regex,
|
||||
}
|
||||
}
|
||||
|
||||
if len(src.Spec.NetworkPolicies.Items) > 0 {
|
||||
t.Spec.NetworkPolicies = src.Spec.NetworkPolicies.Items
|
||||
}
|
||||
|
||||
if len(src.Spec.LimitRanges.Items) > 0 {
|
||||
t.Spec.LimitRanges = src.Spec.LimitRanges.Items
|
||||
}
|
||||
|
||||
if len(src.Spec.ResourceQuota.Items) > 0 {
|
||||
t.Annotations[resourceQuotaScopeAnnotation] = string(src.Spec.ResourceQuota.Scope)
|
||||
t.Spec.ResourceQuota = src.Spec.ResourceQuota.Items
|
||||
}
|
||||
|
||||
if len(src.Spec.AdditionalRoleBindings) > 0 {
|
||||
for _, rb := range src.Spec.AdditionalRoleBindings {
|
||||
t.Spec.AdditionalRoleBindings = append(t.Spec.AdditionalRoleBindings, AdditionalRoleBindingsSpec{
|
||||
@@ -568,7 +518,6 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.ExternalServiceIPs != nil {
|
||||
t.Spec.ExternalServiceIPs = &ExternalServiceIPsSpec{
|
||||
Allowed: make([]AllowedIP, len(src.Spec.ServiceOptions.ExternalServiceIPs.Allowed)),
|
||||
@@ -578,14 +527,11 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
t.Spec.ExternalServiceIPs.Allowed[i] = AllowedIP(IP)
|
||||
}
|
||||
}
|
||||
|
||||
if len(src.Spec.ImagePullPolicies) != 0 {
|
||||
var pullPolicies []string
|
||||
|
||||
for _, policy := range src.Spec.ImagePullPolicies {
|
||||
pullPolicies = append(pullPolicies, string(policy))
|
||||
}
|
||||
|
||||
t.Annotations[podAllowedImagePullPolicyAnnotation] = strings.Join(pullPolicies, ",")
|
||||
}
|
||||
|
||||
@@ -593,24 +539,15 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
if len(src.Spec.PriorityClasses.Exact) != 0 {
|
||||
t.Annotations[podPriorityAllowedAnnotation] = strings.Join(src.Spec.PriorityClasses.Exact, ",")
|
||||
}
|
||||
|
||||
if src.Spec.PriorityClasses.Regex != "" {
|
||||
t.Annotations[podPriorityAllowedRegexAnnotation] = src.Spec.PriorityClasses.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.AllowedServices != nil {
|
||||
if src.Spec.ServiceOptions.AllowedServices.NodePort != nil {
|
||||
t.Annotations[enableNodePortsAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.NodePort)
|
||||
}
|
||||
|
||||
if src.Spec.ServiceOptions.AllowedServices.ExternalName != nil {
|
||||
t.Annotations[enableExternalNameAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.ExternalName)
|
||||
}
|
||||
|
||||
if src.Spec.ServiceOptions.AllowedServices.LoadBalancer != nil {
|
||||
t.Annotations[enableLoadBalancerAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.LoadBalancer)
|
||||
}
|
||||
t.Annotations[enableNodePortsAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.NodePort)
|
||||
t.Annotations[enableExternalNameAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.ExternalName)
|
||||
t.Annotations[enableLoadBalancerAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.LoadBalancer)
|
||||
}
|
||||
|
||||
// Status
|
||||
|
||||
@@ -18,14 +18,12 @@ import (
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// nolint:maintidx
|
||||
func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
var namespaceQuota int32 = 5
|
||||
|
||||
nodeSelector := map[string]string{
|
||||
var nodeSelector = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
v1alpha1AdditionalMetadataSpec := &AdditionalMetadataSpec{
|
||||
var v1alpha1AdditionalMetadataSpec = &AdditionalMetadataSpec{
|
||||
AdditionalLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
@@ -33,11 +31,11 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
v1alpha1AllowedListSpec := &AllowedListSpec{
|
||||
var v1alpha1AllowedListSpec = &AllowedListSpec{
|
||||
Exact: []string{"foo", "bar"},
|
||||
Regex: "^foo*",
|
||||
}
|
||||
v1beta1AdditionalMetadataSpec := &capsulev1beta1.AdditionalMetadataSpec{
|
||||
var v1beta1AdditionalMetadataSpec = &capsulev1beta1.AdditionalMetadataSpec{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
@@ -45,11 +43,11 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
v1beta1NamespaceOptions := &capsulev1beta1.NamespaceOptions{
|
||||
var v1beta1NamespaceOptions = &capsulev1beta1.NamespaceOptions{
|
||||
Quota: &namespaceQuota,
|
||||
AdditionalMetadata: v1beta1AdditionalMetadataSpec,
|
||||
}
|
||||
v1beta1ServiceOptions := &capsulev1beta1.ServiceOptions{
|
||||
var v1beta1ServiceOptions = &capsulev1beta1.ServiceOptions{
|
||||
AdditionalMetadata: v1beta1AdditionalMetadataSpec,
|
||||
AllowedServices: &capsulev1beta1.AllowedServices{
|
||||
NodePort: pointer.BoolPtr(false),
|
||||
@@ -60,11 +58,11 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
Allowed: []capsulev1beta1.AllowedIP{"192.168.0.1"},
|
||||
},
|
||||
}
|
||||
v1beta1AllowedListSpec := &capsulev1beta1.AllowedListSpec{
|
||||
var v1beta1AllowedListSpec = &capsulev1beta1.AllowedListSpec{
|
||||
Exact: []string{"foo", "bar"},
|
||||
Regex: "^foo*",
|
||||
}
|
||||
networkPolicies := []networkingv1.NetworkPolicySpec{
|
||||
var networkPolicies = []networkingv1.NetworkPolicySpec{
|
||||
{
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{
|
||||
{
|
||||
@@ -89,7 +87,7 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
},
|
||||
},
|
||||
}
|
||||
limitRanges := []corev1.LimitRangeSpec{
|
||||
var limitRanges = []corev1.LimitRangeSpec{
|
||||
{
|
||||
Limits: []corev1.LimitRangeItem{
|
||||
{
|
||||
@@ -106,7 +104,7 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
},
|
||||
},
|
||||
}
|
||||
resourceQuotas := []corev1.ResourceQuotaSpec{
|
||||
var resourceQuotas = []corev1.ResourceQuotaSpec{
|
||||
{
|
||||
Hard: map[corev1.ResourceName]resource.Quantity{
|
||||
corev1.ResourceLimitsCPU: resource.MustParse("8"),
|
||||
@@ -120,7 +118,7 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
},
|
||||
}
|
||||
|
||||
v1beta1Tnt := capsulev1beta1.Tenant{
|
||||
var v1beta1Tnt = capsulev1beta1.Tenant{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "alice",
|
||||
@@ -258,7 +256,7 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "Group",
|
||||
APIGroup: rbacv1.GroupName,
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Name: "system:authenticated",
|
||||
},
|
||||
},
|
||||
@@ -276,7 +274,7 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
},
|
||||
}
|
||||
|
||||
v1alpha1Tnt := Tenant{
|
||||
var v1alpha1Tnt = Tenant{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "alice",
|
||||
@@ -329,7 +327,7 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "Group",
|
||||
APIGroup: rbacv1.GroupName,
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Name: "system:authenticated",
|
||||
},
|
||||
},
|
||||
@@ -349,11 +347,10 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
}
|
||||
|
||||
func TestConversionHub_ConvertTo(t *testing.T) {
|
||||
v1beta1ConvertedTnt := capsulev1beta1.Tenant{}
|
||||
var v1beta1ConvertedTnt = capsulev1beta1.Tenant{}
|
||||
|
||||
v1alpha1Tnt, v1beta1tnt := generateTenantsSpecs()
|
||||
err := v1alpha1Tnt.ConvertTo(&v1beta1ConvertedTnt)
|
||||
|
||||
if assert.NoError(t, err) {
|
||||
sort.Slice(v1beta1tnt.Spec.Owners, func(i, j int) bool {
|
||||
return v1beta1tnt.Spec.Owners[i].Name < v1beta1tnt.Spec.Owners[j].Name
|
||||
@@ -367,20 +364,17 @@ func TestConversionHub_ConvertTo(t *testing.T) {
|
||||
return owner.ProxyOperations[i].Kind < owner.ProxyOperations[j].Kind
|
||||
})
|
||||
}
|
||||
|
||||
for _, owner := range v1beta1ConvertedTnt.Spec.Owners {
|
||||
sort.Slice(owner.ProxyOperations, func(i, j int) bool {
|
||||
return owner.ProxyOperations[i].Kind < owner.ProxyOperations[j].Kind
|
||||
})
|
||||
}
|
||||
|
||||
assert.Equal(t, v1beta1tnt, v1beta1ConvertedTnt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConversionHub_ConvertFrom(t *testing.T) {
|
||||
v1alpha1ConvertedTnt := Tenant{}
|
||||
|
||||
var v1alpha1ConvertedTnt = Tenant{}
|
||||
v1alpha1Tnt, v1beta1tnt := generateTenantsSpecs()
|
||||
|
||||
err := v1alpha1ConvertedTnt.ConvertFrom(&v1beta1tnt)
|
||||
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects.
|
||||
// GroupVersion is group version used to register these objects
|
||||
GroupVersion = schema.GroupVersion{Group: "capsule.clastix.io", Version: "v1alpha1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
package v1alpha1
|
||||
|
||||
// OwnerSpec defines tenant owner name and kind.
|
||||
// OwnerSpec defines tenant owner name and kind
|
||||
type OwnerSpec struct {
|
||||
Name string `json:"name"`
|
||||
Kind Kind `json:"kind"`
|
||||
|
||||
@@ -13,7 +13,6 @@ func (t *Tenant) IsCordoned() bool {
|
||||
if v, ok := t.Labels["capsule.clastix.io/cordon"]; ok && v == "enabled" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -22,19 +21,16 @@ func (t *Tenant) IsFull() bool {
|
||||
if t.Spec.NamespaceQuota == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return len(t.Status.Namespaces) >= int(*t.Spec.NamespaceQuota)
|
||||
}
|
||||
|
||||
func (t *Tenant) AssignNamespaces(namespaces []corev1.Namespace) {
|
||||
var l []string
|
||||
|
||||
for _, ns := range namespaces {
|
||||
if ns.Status.Phase == corev1.NamespaceActive {
|
||||
l = append(l, ns.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(l)
|
||||
|
||||
t.Status.Namespaces = l
|
||||
|
||||
@@ -27,6 +27,5 @@ func GetTypeLabel(t runtime.Object) (label string, err error) {
|
||||
default:
|
||||
err = fmt.Errorf("type %T is not mapped as Capsule label recognized", v)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// TenantSpec defines the desired state of Tenant.
|
||||
// TenantSpec defines the desired state of Tenant
|
||||
type TenantSpec struct {
|
||||
Owner OwnerSpec `json:"owner"`
|
||||
|
||||
@@ -29,7 +29,7 @@ type TenantSpec struct {
|
||||
ExternalServiceIPs *ExternalServiceIPsSpec `json:"externalServiceIPs,omitempty"`
|
||||
}
|
||||
|
||||
// TenantStatus defines the observed state of Tenant.
|
||||
// TenantStatus defines the observed state of Tenant
|
||||
type TenantStatus struct {
|
||||
Size uint `json:"size"`
|
||||
Namespaces []string `json:"namespaces,omitempty"`
|
||||
@@ -45,7 +45,7 @@ type TenantStatus struct {
|
||||
// +kubebuilder:printcolumn:name="Node selector",type="string",JSONPath=".spec.nodeSelector",description="Node Selector applied to Pods"
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
|
||||
|
||||
// Tenant is the Schema for the tenants API.
|
||||
// Tenant is the Schema for the tenants API
|
||||
type Tenant struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
@@ -56,7 +56,7 @@ type Tenant struct {
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// TenantList contains a list of Tenant.
|
||||
// TenantList contains a list of Tenant
|
||||
type TenantList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
|
||||
@@ -19,12 +19,9 @@ func (in *AllowedListSpec) ExactMatch(value string) (ok bool) {
|
||||
sort.SliceStable(in.Exact, func(i, j int) bool {
|
||||
return strings.ToLower(in.Exact[i]) < strings.ToLower(in.Exact[j])
|
||||
})
|
||||
|
||||
i := sort.SearchStrings(in.Exact, value)
|
||||
|
||||
ok = i < len(in.Exact) && in.Exact[i] == value
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -32,6 +29,5 @@ func (in AllowedListSpec) RegexMatch(value string) (ok bool) {
|
||||
if len(in.Regex) > 0 {
|
||||
ok = regexp.MustCompile(in.Regex).MatchString(value)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ func TestAllowedListSpec_ExactMatch(t *testing.T) {
|
||||
True []string
|
||||
False []string
|
||||
}
|
||||
|
||||
for _, tc := range []tc{
|
||||
{
|
||||
[]string{"foo", "bar", "bizz", "buzz"},
|
||||
@@ -36,11 +35,9 @@ func TestAllowedListSpec_ExactMatch(t *testing.T) {
|
||||
a := AllowedListSpec{
|
||||
Exact: tc.In,
|
||||
}
|
||||
|
||||
for _, ok := range tc.True {
|
||||
assert.True(t, a.ExactMatch(ok))
|
||||
}
|
||||
|
||||
for _, ko := range tc.False {
|
||||
assert.False(t, a.ExactMatch(ko))
|
||||
}
|
||||
@@ -53,7 +50,6 @@ func TestAllowedListSpec_RegexMatch(t *testing.T) {
|
||||
True []string
|
||||
False []string
|
||||
}
|
||||
|
||||
for _, tc := range []tc{
|
||||
{`first-\w+-pattern`, []string{"first-date-pattern", "first-year-pattern"}, []string{"broken", "first-year", "second-date-pattern"}},
|
||||
{``, nil, []string{"any", "value"}},
|
||||
@@ -61,11 +57,9 @@ func TestAllowedListSpec_RegexMatch(t *testing.T) {
|
||||
a := AllowedListSpec{
|
||||
Regex: tc.Regex,
|
||||
}
|
||||
|
||||
for _, ok := range tc.True {
|
||||
assert.True(t, a.RegexMatch(ok))
|
||||
}
|
||||
|
||||
for _, ko := range tc.False {
|
||||
assert.False(t, a.RegexMatch(ko))
|
||||
}
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
ResourceQuotaAnnotationPrefix = "quota.resources.capsule.clastix.io"
|
||||
ResourceUsedAnnotationPrefix = "used.resources.capsule.clastix.io"
|
||||
)
|
||||
|
||||
func UsedAnnotationForResource(kindGroup string) string {
|
||||
return fmt.Sprintf("%s/%s", ResourceUsedAnnotationPrefix, kindGroup)
|
||||
}
|
||||
|
||||
func LimitAnnotationForResource(kindGroup string) string {
|
||||
return fmt.Sprintf("%s/%s", ResourceQuotaAnnotationPrefix, kindGroup)
|
||||
}
|
||||
|
||||
func GetUsedResourceFromTenant(tenant Tenant, kindGroup string) (int64, error) {
|
||||
usedStr, ok := tenant.GetAnnotations()[UsedAnnotationForResource(kindGroup)]
|
||||
if !ok {
|
||||
usedStr = "0"
|
||||
}
|
||||
|
||||
used, _ := strconv.ParseInt(usedStr, 10, 10)
|
||||
|
||||
return used, nil
|
||||
}
|
||||
|
||||
type NonLimitedResourceError struct {
|
||||
kindGroup string
|
||||
}
|
||||
|
||||
func NewNonLimitedResourceError(kindGroup string) *NonLimitedResourceError {
|
||||
return &NonLimitedResourceError{kindGroup: kindGroup}
|
||||
}
|
||||
|
||||
func (n NonLimitedResourceError) Error() string {
|
||||
return fmt.Sprintf("resource %s is not limited for the current tenant", n.kindGroup)
|
||||
}
|
||||
|
||||
func GetLimitResourceFromTenant(tenant Tenant, kindGroup string) (int64, error) {
|
||||
limitStr, ok := tenant.GetAnnotations()[LimitAnnotationForResource(kindGroup)]
|
||||
if !ok {
|
||||
return 0, NewNonLimitedResourceError(kindGroup)
|
||||
}
|
||||
|
||||
limit, err := strconv.ParseInt(limitStr, 10, 10)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("resource %s limit cannot be parsed, %w", kindGroup, err)
|
||||
}
|
||||
|
||||
return limit, nil
|
||||
}
|
||||
@@ -11,6 +11,5 @@ func (t *Tenant) IsWildcardDenied() bool {
|
||||
if v, ok := t.Annotations[denyWildcard]; ok && v == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -19,12 +19,9 @@ func (in *ForbiddenListSpec) ExactMatch(value string) (ok bool) {
|
||||
sort.SliceStable(in.Exact, func(i, j int) bool {
|
||||
return strings.ToLower(in.Exact[i]) < strings.ToLower(in.Exact[j])
|
||||
})
|
||||
|
||||
i := sort.SearchStrings(in.Exact, value)
|
||||
|
||||
ok = i < len(in.Exact) && in.Exact[i] == value
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -32,6 +29,5 @@ func (in ForbiddenListSpec) RegexMatch(value string) (ok bool) {
|
||||
if len(in.Regex) > 0 {
|
||||
ok = regexp.MustCompile(in.Regex).MatchString(value)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ func TestForbiddenListSpec_ExactMatch(t *testing.T) {
|
||||
True []string
|
||||
False []string
|
||||
}
|
||||
|
||||
for _, tc := range []tc{
|
||||
{
|
||||
[]string{"foo", "bar", "bizz", "buzz"},
|
||||
@@ -36,11 +35,9 @@ func TestForbiddenListSpec_ExactMatch(t *testing.T) {
|
||||
a := ForbiddenListSpec{
|
||||
Exact: tc.In,
|
||||
}
|
||||
|
||||
for _, ok := range tc.True {
|
||||
assert.True(t, a.ExactMatch(ok))
|
||||
}
|
||||
|
||||
for _, ko := range tc.False {
|
||||
assert.False(t, a.ExactMatch(ko))
|
||||
}
|
||||
@@ -53,7 +50,6 @@ func TestForbiddenListSpec_RegexMatch(t *testing.T) {
|
||||
True []string
|
||||
False []string
|
||||
}
|
||||
|
||||
for _, tc := range []tc{
|
||||
{`first-\w+-pattern`, []string{"first-date-pattern", "first-year-pattern"}, []string{"broken", "first-year", "second-date-pattern"}},
|
||||
{``, nil, []string{"any", "value"}},
|
||||
@@ -61,11 +57,9 @@ func TestForbiddenListSpec_RegexMatch(t *testing.T) {
|
||||
a := ForbiddenListSpec{
|
||||
Regex: tc.Regex,
|
||||
}
|
||||
|
||||
for _, ok := range tc.True {
|
||||
assert.True(t, a.RegexMatch(ok))
|
||||
}
|
||||
|
||||
for _, ko := range tc.False {
|
||||
assert.False(t, a.RegexMatch(ko))
|
||||
}
|
||||
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects.
|
||||
// GroupVersion is group version used to register these objects
|
||||
GroupVersion = schema.GroupVersion{Group: "capsule.clastix.io", Version: "v1beta1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
|
||||
@@ -14,11 +14,9 @@ func (t *Tenant) hasForbiddenNamespaceLabelsAnnotations() bool {
|
||||
if _, ok := t.Annotations[ForbiddenNamespaceLabelsAnnotation]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
if _, ok := t.Annotations[ForbiddenNamespaceLabelsRegexpAnnotation]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -26,11 +24,9 @@ func (t *Tenant) hasForbiddenNamespaceAnnotationsAnnotations() bool {
|
||||
if _, ok := t.Annotations[ForbiddenNamespaceAnnotationsAnnotation]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
if _, ok := t.Annotations[ForbiddenNamespaceAnnotationsRegexpAnnotation]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -38,7 +34,6 @@ func (t *Tenant) ForbiddenUserNamespaceLabels() *ForbiddenListSpec {
|
||||
if !t.hasForbiddenNamespaceLabelsAnnotations() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &ForbiddenListSpec{
|
||||
Exact: strings.Split(t.Annotations[ForbiddenNamespaceLabelsAnnotation], ","),
|
||||
Regex: t.Annotations[ForbiddenNamespaceLabelsRegexpAnnotation],
|
||||
@@ -49,7 +44,6 @@ func (t *Tenant) ForbiddenUserNamespaceAnnotations() *ForbiddenListSpec {
|
||||
if !t.hasForbiddenNamespaceAnnotationsAnnotations() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &ForbiddenListSpec{
|
||||
Exact: strings.Split(t.Annotations[ForbiddenNamespaceAnnotationsAnnotation], ","),
|
||||
Regex: t.Annotations[ForbiddenNamespaceAnnotationsRegexpAnnotation],
|
||||
|
||||
@@ -15,7 +15,6 @@ func (o OwnerListSpec) FindOwner(name string, kind OwnerKind) (owner OwnerSpec)
|
||||
if i < len(o) && o[i].Kind == kind && o[i].Name == name {
|
||||
return o[i]
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -24,15 +23,12 @@ type ByKindAndName OwnerListSpec
|
||||
func (b ByKindAndName) Len() int {
|
||||
return len(b)
|
||||
}
|
||||
|
||||
func (b ByKindAndName) Less(i, j int) bool {
|
||||
if b[i].Kind.String() != b[j].Kind.String() {
|
||||
return b[i].Kind.String() < b[j].Kind.String()
|
||||
}
|
||||
|
||||
return b[i].Name < b[j].Name
|
||||
}
|
||||
|
||||
func (b ByKindAndName) Swap(i, j int) {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
)
|
||||
|
||||
func TestOwnerListSpec_FindOwner(t *testing.T) {
|
||||
bla := OwnerSpec{
|
||||
var bla = OwnerSpec{
|
||||
Kind: UserOwner,
|
||||
Name: "bla",
|
||||
ProxyOperations: []ProxySettings{
|
||||
@@ -17,7 +17,7 @@ func TestOwnerListSpec_FindOwner(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
bar := OwnerSpec{
|
||||
var bar = OwnerSpec{
|
||||
Kind: GroupOwner,
|
||||
Name: "bar",
|
||||
ProxyOperations: []ProxySettings{
|
||||
@@ -27,7 +27,7 @@ func TestOwnerListSpec_FindOwner(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
baz := OwnerSpec{
|
||||
var baz = OwnerSpec{
|
||||
Kind: UserOwner,
|
||||
Name: "baz",
|
||||
ProxyOperations: []ProxySettings{
|
||||
@@ -37,7 +37,7 @@ func TestOwnerListSpec_FindOwner(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
fim := OwnerSpec{
|
||||
var fim = OwnerSpec{
|
||||
Kind: ServiceAccountOwner,
|
||||
Name: "fim",
|
||||
ProxyOperations: []ProxySettings{
|
||||
@@ -47,7 +47,7 @@ func TestOwnerListSpec_FindOwner(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
bom := OwnerSpec{
|
||||
var bom = OwnerSpec{
|
||||
Kind: GroupOwner,
|
||||
Name: "bom",
|
||||
ProxyOperations: []ProxySettings{
|
||||
@@ -61,7 +61,7 @@ func TestOwnerListSpec_FindOwner(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
qip := OwnerSpec{
|
||||
var qip = OwnerSpec{
|
||||
Kind: ServiceAccountOwner,
|
||||
Name: "qip",
|
||||
ProxyOperations: []ProxySettings{
|
||||
@@ -71,7 +71,7 @@ func TestOwnerListSpec_FindOwner(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
owners := OwnerListSpec{bom, qip, bla, bar, baz, fim}
|
||||
var owners = OwnerListSpec{bom, qip, bla, bar, baz, fim}
|
||||
|
||||
assert.Equal(t, owners.FindOwner("bom", GroupOwner), bom)
|
||||
assert.Equal(t, owners.FindOwner("qip", ServiceAccountOwner), qip)
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
ClusterRoleNamesAnnotation = "clusterrolenames.capsule.clastix.io"
|
||||
)
|
||||
|
||||
// GetRoles read the annotation available in the Tenant specification and if it matches the pattern
|
||||
// clusterrolenames.capsule.clastix.io/${KIND}.${NAME} returns the associated roles.
|
||||
// Kubernetes annotations and labels must respect RFC 1123 about DNS names and this could be cumbersome in two cases:
|
||||
// 1. identifying users based on their email address
|
||||
// 2. the overall length of the annotation key that is exceeding 63 characters
|
||||
// For emails, the symbol @ can be replaced with the placeholder __AT__.
|
||||
// For the latter one, the index of the owner can be used to force the retrieval.
|
||||
func (in OwnerSpec) GetRoles(tenant Tenant, index int) []string {
|
||||
for key, value := range tenant.GetAnnotations() {
|
||||
if !strings.HasPrefix(key, fmt.Sprintf("%s/", ClusterRoleNamesAnnotation)) {
|
||||
continue
|
||||
}
|
||||
|
||||
for symbol, replace := range in.convertMap() {
|
||||
key = strings.ReplaceAll(key, symbol, replace)
|
||||
}
|
||||
|
||||
nameBased := key == fmt.Sprintf("%s/%s.%s", ClusterRoleNamesAnnotation, strings.ToLower(in.Kind.String()), strings.ToLower(in.Name))
|
||||
|
||||
indexBased := key == fmt.Sprintf("%s/%d", ClusterRoleNamesAnnotation, index)
|
||||
|
||||
if nameBased || indexBased {
|
||||
return strings.Split(value, ",")
|
||||
}
|
||||
}
|
||||
|
||||
return []string{"admin", "capsule-namespace-deleter"}
|
||||
}
|
||||
|
||||
func (in OwnerSpec) convertMap() map[string]string {
|
||||
return map[string]string{
|
||||
"__AT__": "@",
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@ package v1beta1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -19,13 +18,12 @@ const (
|
||||
ForbiddenNamespaceLabelsRegexpAnnotation = "capsule.clastix.io/forbidden-namespace-labels-regexp"
|
||||
ForbiddenNamespaceAnnotationsAnnotation = "capsule.clastix.io/forbidden-namespace-annotations"
|
||||
ForbiddenNamespaceAnnotationsRegexpAnnotation = "capsule.clastix.io/forbidden-namespace-annotations-regexp"
|
||||
ProtectedTenantAnnotation = "capsule.clastix.io/protected"
|
||||
)
|
||||
|
||||
func UsedQuotaFor(resource fmt.Stringer) string {
|
||||
return "quota.capsule.clastix.io/used-" + strings.ReplaceAll(resource.String(), "/", "_")
|
||||
return "quota.capsule.clastix.io/used-" + resource.String()
|
||||
}
|
||||
|
||||
func HardQuotaFor(resource fmt.Stringer) string {
|
||||
return "quota.capsule.clastix.io/hard-" + strings.ReplaceAll(resource.String(), "/", "_")
|
||||
return "quota.capsule.clastix.io/hard-" + resource.String()
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ func (t *Tenant) IsCordoned() bool {
|
||||
if v, ok := t.Labels["capsule.clastix.io/cordon"]; ok && v == "enabled" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -22,19 +21,16 @@ func (t *Tenant) IsFull() bool {
|
||||
if t.Spec.NamespaceOptions == nil || t.Spec.NamespaceOptions.Quota == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return len(t.Status.Namespaces) >= int(*t.Spec.NamespaceOptions.Quota)
|
||||
}
|
||||
|
||||
func (t *Tenant) AssignNamespaces(namespaces []corev1.Namespace) {
|
||||
var l []string
|
||||
|
||||
for _, ns := range namespaces {
|
||||
if ns.Status.Phase == corev1.NamespaceActive {
|
||||
l = append(l, ns.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(l)
|
||||
|
||||
t.Status.Namespaces = l
|
||||
|
||||
@@ -27,6 +27,5 @@ func GetTypeLabel(t runtime.Object) (label string, err error) {
|
||||
default:
|
||||
err = fmt.Errorf("type %T is not mapped as Capsule label recognized", v)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ const (
|
||||
TenantStateCordoned tenantState = "Cordoned"
|
||||
)
|
||||
|
||||
// Returns the observed state of the Tenant.
|
||||
// Returns the observed state of the Tenant
|
||||
type TenantStatus struct {
|
||||
//+kubebuilder:default=Active
|
||||
// The operational state of the Tenant. Possible values are "Active", "Cordoned".
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// TenantSpec defines the desired state of Tenant.
|
||||
// TenantSpec defines the desired state of Tenant
|
||||
type TenantSpec struct {
|
||||
// Specifies the owners of the Tenant. Mandatory.
|
||||
Owners OwnerListSpec `json:"owners"`
|
||||
@@ -21,11 +21,11 @@ type TenantSpec struct {
|
||||
IngressOptions IngressOptions `json:"ingressOptions,omitempty"`
|
||||
// Specifies the trusted Image Registries assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed trusted registries. Optional.
|
||||
ContainerRegistries *AllowedListSpec `json:"containerRegistries,omitempty"`
|
||||
// Specifies the label to control the placement of pods on a given pool of worker nodes. All namespaces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
// Specifies the label to control the placement of pods on a given pool of worker nodes. All namesapces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
// Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
NetworkPolicies NetworkPolicySpec `json:"networkPolicies,omitempty"`
|
||||
// Specifies the resource min/max usage restrictions to the Tenant. The assigned values are inherited by any namespace created in the Tenant. Optional.
|
||||
// Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
LimitRanges LimitRangesSpec `json:"limitRanges,omitempty"`
|
||||
// Specifies a list of ResourceQuota resources assigned to the Tenant. The assigned values are inherited by any namespace created in the Tenant. The Capsule operator aggregates ResourceQuota at Tenant level, so that the hard quota is never crossed for the given Tenant. This permits the Tenant owner to consume resources in the Tenant regardless of the namespace. Optional.
|
||||
ResourceQuota ResourceQuotaSpec `json:"resourceQuotas,omitempty"`
|
||||
@@ -47,7 +47,7 @@ type TenantSpec struct {
|
||||
// +kubebuilder:printcolumn:name="Node selector",type="string",JSONPath=".spec.nodeSelector",description="Node Selector applied to Pods"
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
|
||||
|
||||
// Tenant is the Schema for the tenants API.
|
||||
// Tenant is the Schema for the tenants API
|
||||
type Tenant struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
@@ -60,7 +60,7 @@ func (t *Tenant) Hub() {}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// TenantList contains a list of Tenant.
|
||||
// TenantList contains a list of Tenant
|
||||
type TenantList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
@@ -269,21 +268,6 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NonLimitedResourceError) DeepCopyInto(out *NonLimitedResourceError) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonLimitedResourceError.
|
||||
func (in *NonLimitedResourceError) DeepCopy() *NonLimitedResourceError {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NonLimitedResourceError)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in OwnerListSpec) DeepCopyInto(out *OwnerListSpec) {
|
||||
{
|
||||
|
||||
|
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 29 KiB |
@@ -21,4 +21,3 @@
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
README.md.gotmpl
|
||||
|
||||
@@ -21,8 +21,8 @@ sources:
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.1.11
|
||||
version: 0.1.3
|
||||
|
||||
# This is the version number of the application being deployed.
|
||||
# This version number should be incremented each time you make changes to the application.
|
||||
appVersion: 0.1.2
|
||||
appVersion: 0.1.0
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
docs: HELMDOCS_VERSION := v1.8.1
|
||||
docs: docker
|
||||
@docker run --rm -v "$$(pwd):/helm-docs" -u $$(id -u) jnorwood/helm-docs:$(HELMDOCS_VERSION)
|
||||
|
||||
docker:
|
||||
@hash docker 2>/dev/null || {\
|
||||
echo "You need docker" &&\
|
||||
exit 1;\
|
||||
}
|
||||
@@ -24,19 +24,23 @@ The Capsule Operator Chart can be used to instantly deploy the Capsule Operator
|
||||
|
||||
$ helm repo add clastix https://clastix.github.io/charts
|
||||
|
||||
2. Install the Chart:
|
||||
2. Create the Namespace:
|
||||
|
||||
$ helm install capsule clastix/capsule -n capsule-system --create-namespace
|
||||
$ kubectl create namespace capsule-system
|
||||
|
||||
3. Show the status:
|
||||
3. Install the Chart:
|
||||
|
||||
$ helm install capsule clastix/capsule -n capsule-system
|
||||
|
||||
4. Show the status:
|
||||
|
||||
$ helm status capsule -n capsule-system
|
||||
|
||||
4. Upgrade the Chart
|
||||
5. Upgrade the Chart
|
||||
|
||||
$ helm upgrade capsule clastix/capsule -n capsule-system
|
||||
|
||||
5. Uninstall the Chart
|
||||
6. Uninstall the Chart
|
||||
|
||||
$ helm uninstall capsule -n capsule-system
|
||||
|
||||
@@ -54,101 +58,47 @@ The values in your overrides file `myvalues.yaml` will override their counterpar
|
||||
|
||||
If you only need to make minor customizations, you can specify them on the command line by using the `--set` option. For example:
|
||||
|
||||
$ helm install capsule capsule-helm-chart --set manager.options.forceTenantPrefix=false -n capsule-system
|
||||
$ helm install capsule capsule-helm-chart --set force_tenant_prefix=false -n capsule-system
|
||||
|
||||
Here the values you can override:
|
||||
|
||||
### General Parameters
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| affinity | object | `{}` | Set affinity rules for the Capsule pod |
|
||||
| certManager.generateCertificates | bool | `false` | Specifies whether capsule webhooks certificates should be generated using cert-manager |
|
||||
| customAnnotations | object | `{}` | Additional annotations which will be added to all resources created by Capsule helm chart |
|
||||
| customLabels | object | `{}` | Additional labels which will be added to all resources created by Capsule helm chart |
|
||||
| jobs.image.pullPolicy | string | `"IfNotPresent"` | Set the image pull policy of the helm chart job |
|
||||
| jobs.image.repository | string | `"quay.io/clastix/kubectl"` | Set the image repository of the helm chart job |
|
||||
| jobs.image.tag | string | `""` | Set the image tag of the helm chart job |
|
||||
| mutatingWebhooksTimeoutSeconds | int | `30` | Timeout in seconds for mutating webhooks |
|
||||
| nodeSelector | object | `{}` | Set the node selector for the Capsule pod |
|
||||
| podAnnotations | object | `{}` | Annotations to add to the capsule pod. |
|
||||
| podSecurityPolicy.enabled | bool | `false` | Specify if a Pod Security Policy must be created |
|
||||
| priorityClassName | string | `""` | Set the priority class name of the Capsule pod |
|
||||
| replicaCount | int | `1` | Set the replica count for capsule pod |
|
||||
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account. |
|
||||
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created. |
|
||||
| serviceAccount.name | string | `"capsule"` | The name of the service account to use. If not set and `serviceAccount.create=true`, a name is generated using the fullname template |
|
||||
| tls.create | bool | `true` | When cert-manager is disabled, Capsule will generate the TLS certificate for webhook and CRDs conversion. |
|
||||
| tls.enableController | bool | `true` | Start the Capsule controller that injects the CA into mutating and validating webhooks, and CRD as well. |
|
||||
| tls.name | string | `""` | Override name of the Capsule TLS Secret name when externally managed. |
|
||||
| tolerations | list | `[]` | Set list of tolerations for the Capsule pod |
|
||||
| validatingWebhooksTimeoutSeconds | int | `30` | Timeout in seconds for validating webhooks |
|
||||
|
||||
### Manager Parameters
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| manager.hostNetwork | bool | `false` | Specifies if the container should be started in hostNetwork mode. Required for use in some managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working |
|
||||
| manager.image.pullPolicy | string | `"IfNotPresent"` | Set the image pull policy. |
|
||||
| manager.image.repository | string | `"clastix/capsule"` | Set the image repository of the capsule. |
|
||||
| manager.image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. |
|
||||
| manager.imagePullSecrets | list | `[]` | Configuration for `imagePullSecrets` so that you can use a private images registry. |
|
||||
| manager.kind | string | `"Deployment"` | Set the controller deployment mode as `Deployment` or `DaemonSet`. |
|
||||
| manager.livenessProbe | object | `{"httpGet":{"path":"/healthz","port":10080}}` | Configure the liveness probe using Deployment probe spec |
|
||||
| manager.options.capsuleUserGroups | list | `["capsule.clastix.io"]` | Override the Capsule user groups |
|
||||
| manager.options.forceTenantPrefix | bool | `false` | Boolean, enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash |
|
||||
| manager.options.generateCertificates | bool | `true` | Specifies whether capsule webhooks certificates should be generated by capsule operator |
|
||||
| manager.options.logLevel | string | `"4"` | Set the log verbosity of the capsule with a value from 1 to 10 |
|
||||
| manager.options.protectedNamespaceRegex | string | `""` | If specified, disallows creation of namespaces matching the passed regexp |
|
||||
| manager.readinessProbe | object | `{"httpGet":{"path":"/readyz","port":10080}}` | Configure the readiness probe using Deployment probe spec |
|
||||
| manager.resources.limits.cpu | string | `"200m"` | |
|
||||
| manager.resources.limits.memory | string | `"128Mi"` | |
|
||||
| manager.resources.requests.cpu | string | `"200m"` | |
|
||||
| manager.resources.requests.memory | string | `"128Mi"` | |
|
||||
|
||||
### ServiceMonitor Parameters
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| serviceMonitor.annotations | object | `{}` | Assign additional Annotations |
|
||||
| serviceMonitor.enabled | bool | `false` | Enable ServiceMonitor |
|
||||
| serviceMonitor.endpoint.interval | string | `"15s"` | Set the scrape interval for the endpoint of the serviceMonitor |
|
||||
| serviceMonitor.endpoint.metricRelabelings | list | `[]` | Set metricRelabelings for the endpoint of the serviceMonitor |
|
||||
| serviceMonitor.endpoint.relabelings | list | `[]` | Set relabelings for the endpoint of the serviceMonitor |
|
||||
| serviceMonitor.endpoint.scrapeTimeout | string | `""` | Set the scrape timeout for the endpoint of the serviceMonitor |
|
||||
| serviceMonitor.labels | object | `{}` | Assign additional labels according to Prometheus' serviceMonitorSelector matching labels |
|
||||
| serviceMonitor.matchLabels | object | `{}` | Change matching labels |
|
||||
| serviceMonitor.namespace | string | `""` | Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one) |
|
||||
| serviceMonitor.serviceAccount.name | string | `"capsule"` | ServiceAccount for Metrics RBAC |
|
||||
| serviceMonitor.serviceAccount.namespace | string | `"capsule-system"` | ServiceAccount Namespace for Metrics RBAC |
|
||||
| serviceMonitor.targetLabels | list | `[]` | Set targetLabels for the serviceMonitor |
|
||||
|
||||
### Webhook Parameters
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| webhooks.cordoning.failurePolicy | string | `"Fail"` | |
|
||||
| webhooks.cordoning.namespaceSelector.matchExpressions[0].key | string | `"capsule.clastix.io/tenant"` | |
|
||||
| webhooks.cordoning.namespaceSelector.matchExpressions[0].operator | string | `"Exists"` | |
|
||||
| webhooks.ingresses.failurePolicy | string | `"Fail"` | |
|
||||
| webhooks.ingresses.namespaceSelector.matchExpressions[0].key | string | `"capsule.clastix.io/tenant"` | |
|
||||
| webhooks.ingresses.namespaceSelector.matchExpressions[0].operator | string | `"Exists"` | |
|
||||
| webhooks.namespaceOwnerReference.failurePolicy | string | `"Fail"` | |
|
||||
| webhooks.namespaces.failurePolicy | string | `"Fail"` | |
|
||||
| webhooks.networkpolicies.failurePolicy | string | `"Fail"` | |
|
||||
| webhooks.networkpolicies.namespaceSelector.matchExpressions[0].key | string | `"capsule.clastix.io/tenant"` | |
|
||||
| webhooks.networkpolicies.namespaceSelector.matchExpressions[0].operator | string | `"Exists"` | |
|
||||
| webhooks.nodes.failurePolicy | string | `"Fail"` | |
|
||||
| webhooks.persistentvolumeclaims.failurePolicy | string | `"Fail"` | |
|
||||
| webhooks.persistentvolumeclaims.namespaceSelector.matchExpressions[0].key | string | `"capsule.clastix.io/tenant"` | |
|
||||
| webhooks.persistentvolumeclaims.namespaceSelector.matchExpressions[0].operator | string | `"Exists"` | |
|
||||
| webhooks.pods.failurePolicy | string | `"Fail"` | |
|
||||
| webhooks.pods.namespaceSelector.matchExpressions[0].key | string | `"capsule.clastix.io/tenant"` | |
|
||||
| webhooks.pods.namespaceSelector.matchExpressions[0].operator | string | `"Exists"` | |
|
||||
| webhooks.services.failurePolicy | string | `"Fail"` | |
|
||||
| webhooks.services.namespaceSelector.matchExpressions[0].key | string | `"capsule.clastix.io/tenant"` | |
|
||||
| webhooks.services.namespaceSelector.matchExpressions[0].operator | string | `"Exists"` | |
|
||||
| webhooks.tenants.failurePolicy | string | `"Fail"` | |
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`manager.hostNetwork` | Specifies if the container should be started in `hostNetwork` mode. | `false`
|
||||
`manager.options.logLevel` | Set the log verbosity of the controller with a value from 1 to 10.| `4`
|
||||
`manager.options.forceTenantPrefix` | Boolean, enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash | `false`
|
||||
`manager.options.capsuleUserGroups` | Override the Capsule user groups | `[capsule.clastix.io]`
|
||||
`manager.options.protectedNamespaceRegex` | If specified, disallows creation of namespaces matching the passed regexp | `null`
|
||||
`manager.image.repository` | Set the image repository of the controller. | `quay.io/clastix/capsule`
|
||||
`manager.image.tag` | Overrides the image tag whose default is the chart. `appVersion` | `null`
|
||||
`manager.image.pullPolicy` | Set the image pull policy. | `IfNotPresent`
|
||||
`manager.livenessProbe` | Configure the liveness probe using Deployment probe spec | `GET :10080/healthz`
|
||||
`manager.readinessProbe` | Configure the readiness probe using Deployment probe spec | `GET :10080/readyz`
|
||||
`manager.resources.requests/cpu` | Set the CPU requests assigned to the controller. | `200m`
|
||||
`manager.resources.requests/memory` | Set the memory requests assigned to the controller. | `128Mi`
|
||||
`manager.resources.limits/cpu` | Set the CPU limits assigned to the controller. | `200m`
|
||||
`manager.resources.limits/cpu` | Set the memory limits assigned to the controller. | `128Mi`
|
||||
`mutatingWebhooksTimeoutSeconds` | Timeout in seconds for mutating webhooks. | `30`
|
||||
`validatingWebhooksTimeoutSeconds` | Timeout in seconds for validating webhooks. | `30`
|
||||
`imagePullSecrets` | Configuration for `imagePullSecrets` so that you can use a private images registry. | `[]`
|
||||
`serviceAccount.create` | Specifies whether a service account should be created. | `true`
|
||||
`serviceAccount.annotations` | Annotations to add to the service account. | `{}`
|
||||
`serviceAccount.name` | The name of the service account to use. If not set and `serviceAccount.create=true`, a name is generated using the fullname template | `capsule`
|
||||
`podAnnotations` | Annotations to add to the Capsule pod. | `{}`
|
||||
`priorityClassName` | Set the priority class name of the Capsule pod. | `null`
|
||||
`nodeSelector` | Set the node selector for the Capsule pod. | `{}`
|
||||
`tolerations` | Set list of tolerations for the Capsule pod. | `[]`
|
||||
`replicaCount` | Set the replica count for Capsule pod. | `1`
|
||||
`affinity` | Set affinity rules for the Capsule pod. | `{}`
|
||||
`podSecurityPolicy.enabled` | Specify if a Pod Security Policy must be created. | `false`
|
||||
`serviceMonitor.enabled` | Specifies if a service monitor must be created. | `false`
|
||||
`serviceMonitor.labels` | Additional labels which will be added to service monitor. | `{}`
|
||||
`serviceMonitor.annotations` | Additional annotations which will be added to service monitor. | `{}`
|
||||
`serviceMonitor.matchLabels` | Additional matchLabels which will be added to service monitor. | `{}`
|
||||
`serviceMonitor.serviceAccount.name` | Specifies service account name for metrics scrape. | `capsule`
|
||||
`serviceMonitor.serviceAccount.namespace` | Specifies service account namespace for metrics scrape. | `capsule-system`
|
||||
`customLabels` | Additional labels which will be added to all resources created by Capsule helm chart . | `{}`
|
||||
`customAnnotations` | Additional annotations which will be added to all resources created by Capsule helm chart . | `{}`
|
||||
|
||||
## Created resources
|
||||
|
||||
@@ -160,7 +110,6 @@ This Helm Chart creates the following Kubernetes resources in the release namesp
|
||||
* CA Secret
|
||||
* Certificate Secret
|
||||
* Tenant Custom Resource Definition
|
||||
* CapsuleConfiguration Custom Resource Definition
|
||||
* MutatingWebHookConfiguration
|
||||
* ValidatingWebHookConfiguration
|
||||
* RBAC Cluster Roles
|
||||
@@ -178,34 +127,6 @@ And optionally, depending on the values set:
|
||||
|
||||
Capsule, as many other add-ons, defines its own set of Custom Resource Definitions (CRDs). Helm3 removed the old CRDs installation method for a more simple methodology. In the Helm Chart, there is now a special directory called `crds` to hold the CRDs. These CRDs are not templated, but will be installed by default when running a `helm install` for the chart. If the CRDs already exist (for example, you already executed `helm install`), it will be skipped with a warning. When you wish to skip the CRDs installation, and do not see the warning, you can pass the `--skip-crds` flag to the `helm install` command.
|
||||
|
||||
## Cert-Manager integration
|
||||
|
||||
You can enable the generation of certificates using `cert-manager` as follows.
|
||||
|
||||
```
|
||||
helm upgrade --install capsule clastix/capsule --namespace capsule-system --create-namespace \
|
||||
--set "certManager.generateCertificates=true" \
|
||||
--set "tls.create=false" \
|
||||
--set "tls.enableController=false"
|
||||
```
|
||||
|
||||
With the usage of `tls.enableController=false` value, you're delegating the injection of the Validating and Mutating Webhooks' CA to `cert-manager`.
|
||||
Since Helm3 doesn't allow to template _CRDs_, you have to patch manually the Custom Resource Definition `tenants.capsule.clastix.io` adding the proper annotation (YMMV).
|
||||
|
||||
```yaml
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.5.0
|
||||
cert-manager.io/inject-ca-from: capsule-system/capsule-webhook-cert
|
||||
creationTimestamp: "2022-07-22T08:32:51Z"
|
||||
generation: 45
|
||||
name: tenants.capsule.clastix.io
|
||||
resourceVersion: "9832"
|
||||
uid: 61e287df-319b-476d-88d5-bdb8dc14d4a6
|
||||
```
|
||||
|
||||
## More
|
||||
|
||||
See Capsule [tutorial](https://github.com/clastix/capsule/blob/master/docs/content/general/tutorial.md) for more information about how to use Capsule.
|
||||
See Capsule [use cases](https://github.com/clastix/capsule/blob/master/use_cases.md) for more information about how to use Capsule.
|
||||
|
||||
@@ -1,160 +0,0 @@
|
||||
# Deploying the Capsule Operator
|
||||
|
||||
Use the Capsule Operator for easily implementing, managing, and maintaining multitenancy and access control in Kubernetes.
|
||||
|
||||
## Requirements
|
||||
|
||||
* [Helm 3](https://github.com/helm/helm/releases) is required when installing the Capsule Operator chart. Follow Helm’s official [steps](https://helm.sh/docs/intro/install/) for installing helm on your particular operating system.
|
||||
|
||||
* A Kubernetes cluster 1.16+ with following [Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) enabled:
|
||||
|
||||
* PodNodeSelector
|
||||
* LimitRanger
|
||||
* ResourceQuota
|
||||
* MutatingAdmissionWebhook
|
||||
* ValidatingAdmissionWebhook
|
||||
|
||||
* A [`kubeconfig`](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file accessing the Kubernetes cluster with cluster admin permissions.
|
||||
|
||||
## Quick Start
|
||||
|
||||
The Capsule Operator Chart can be used to instantly deploy the Capsule Operator on your Kubernetes cluster.
|
||||
|
||||
1. Add this repository:
|
||||
|
||||
$ helm repo add clastix https://clastix.github.io/charts
|
||||
|
||||
2. Install the Chart:
|
||||
|
||||
$ helm install capsule clastix/capsule -n capsule-system --create-namespace
|
||||
|
||||
3. Show the status:
|
||||
|
||||
$ helm status capsule -n capsule-system
|
||||
|
||||
4. Upgrade the Chart
|
||||
|
||||
$ helm upgrade capsule clastix/capsule -n capsule-system
|
||||
|
||||
5. Uninstall the Chart
|
||||
|
||||
$ helm uninstall capsule -n capsule-system
|
||||
|
||||
## Customize the installation
|
||||
|
||||
There are two methods for specifying overrides of values during chart installation: `--values` and `--set`.
|
||||
|
||||
The `--values` option is the preferred method because it allows you to keep your overrides in a YAML file, rather than specifying them all on the command line. Create a copy of the YAML file `values.yaml` and add your overrides to it.
|
||||
|
||||
Specify your overrides file when you install the chart:
|
||||
|
||||
$ helm install capsule capsule-helm-chart --values myvalues.yaml -n capsule-system
|
||||
|
||||
The values in your overrides file `myvalues.yaml` will override their counterparts in the chart’s values.yaml file. Any values in `values.yaml` that weren’t overridden will keep their defaults.
|
||||
|
||||
If you only need to make minor customizations, you can specify them on the command line by using the `--set` option. For example:
|
||||
|
||||
$ helm install capsule capsule-helm-chart --set manager.options.forceTenantPrefix=false -n capsule-system
|
||||
|
||||
Here the values you can override:
|
||||
|
||||
|
||||
### General Parameters
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if not (or (hasPrefix "manager" .Key) (hasPrefix "serviceMonitor" .Key) (hasPrefix "webhook" .Key) (hasPrefix "capsule-proxy" .Key) ) }}
|
||||
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### Manager Parameters
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "manager" .Key }}
|
||||
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### ServiceMonitor Parameters
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "serviceMonitor" .Key }}
|
||||
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### Webhook Parameters
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "webhook" .Key }}
|
||||
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
## Created resources
|
||||
|
||||
This Helm Chart creates the following Kubernetes resources in the release namespace:
|
||||
|
||||
* Capsule Namespace
|
||||
* Capsule Operator Deployment
|
||||
* Capsule Service
|
||||
* CA Secret
|
||||
* Certificate Secret
|
||||
* Tenant Custom Resource Definition
|
||||
* CapsuleConfiguration Custom Resource Definition
|
||||
* MutatingWebHookConfiguration
|
||||
* ValidatingWebHookConfiguration
|
||||
* RBAC Cluster Roles
|
||||
* Metrics Service
|
||||
|
||||
And optionally, depending on the values set:
|
||||
|
||||
* Capsule ServiceAccount
|
||||
* Capsule Service Monitor
|
||||
* PodSecurityPolicy
|
||||
* RBAC ClusterRole and RoleBinding for pod security policy
|
||||
* RBAC Role and Rolebinding for metrics scrape
|
||||
|
||||
## Notes on installing Custom Resource Definitions with Helm3
|
||||
|
||||
Capsule, as many other add-ons, defines its own set of Custom Resource Definitions (CRDs). Helm3 removed the old CRDs installation method for a more simple methodology. In the Helm Chart, there is now a special directory called `crds` to hold the CRDs. These CRDs are not templated, but will be installed by default when running a `helm install` for the chart. If the CRDs already exist (for example, you already executed `helm install`), it will be skipped with a warning. When you wish to skip the CRDs installation, and do not see the warning, you can pass the `--skip-crds` flag to the `helm install` command.
|
||||
|
||||
## Cert-Manager integration
|
||||
|
||||
You can enable the generation of certificates using `cert-manager` as follows.
|
||||
|
||||
```
|
||||
helm upgrade --install capsule clastix/capsule --namespace capsule-system --create-namespace \
|
||||
--set "certManager.generateCertificates=true" \
|
||||
--set "tls.create=false" \
|
||||
--set "tls.enableController=false"
|
||||
```
|
||||
|
||||
With the usage of `tls.enableController=false` value, you're delegating the injection of the Validating and Mutating Webhooks' CA to `cert-manager`.
|
||||
Since Helm3 doesn't allow to template _CRDs_, you have to patch manually the Custom Resource Definition `tenants.capsule.clastix.io` adding the proper annotation (YMMV).
|
||||
|
||||
```yaml
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.5.0
|
||||
cert-manager.io/inject-ca-from: capsule-system/capsule-webhook-cert
|
||||
creationTimestamp: "2022-07-22T08:32:51Z"
|
||||
generation: 45
|
||||
name: tenants.capsule.clastix.io
|
||||
resourceVersion: "9832"
|
||||
uid: 61e287df-319b-476d-88d5-bdb8dc14d4a6
|
||||
```
|
||||
|
||||
## More
|
||||
|
||||
See Capsule [tutorial](https://github.com/clastix/capsule/blob/master/docs/content/general/tutorial.md) for more information about how to use Capsule.
|
||||
@@ -17,7 +17,7 @@ spec:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: CapsuleConfiguration is the Schema for the Capsule configuration API.
|
||||
description: CapsuleConfiguration is the Schema for the Capsule configuration API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
@@ -28,7 +28,7 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration.
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration
|
||||
properties:
|
||||
forceTenantPrefix:
|
||||
default: false
|
||||
|
||||
@@ -7,17 +7,7 @@ metadata:
|
||||
name: tenants.capsule.clastix.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
clientConfig:
|
||||
service:
|
||||
name: capsule-webhook-service
|
||||
namespace: capsule-system
|
||||
path: /convert
|
||||
port: 443
|
||||
conversionReviewVersions:
|
||||
- v1alpha1
|
||||
- v1beta1
|
||||
strategy: None
|
||||
group: capsule.clastix.io
|
||||
names:
|
||||
kind: Tenant
|
||||
@@ -56,7 +46,7 @@ spec:
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Tenant is the Schema for the tenants API.
|
||||
description: Tenant is the Schema for the tenants API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
@@ -67,7 +57,7 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: TenantSpec defines the desired state of Tenant.
|
||||
description: TenantSpec defines the desired state of Tenant
|
||||
properties:
|
||||
additionalRoleBindings:
|
||||
items:
|
||||
@@ -485,7 +475,7 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
owner:
|
||||
description: OwnerSpec defines tenant owner name and kind.
|
||||
description: OwnerSpec defines tenant owner name and kind
|
||||
properties:
|
||||
kind:
|
||||
enum:
|
||||
@@ -568,7 +558,7 @@ spec:
|
||||
- owner
|
||||
type: object
|
||||
status:
|
||||
description: TenantStatus defines the observed state of Tenant.
|
||||
description: TenantStatus defines the observed state of Tenant
|
||||
properties:
|
||||
namespaces:
|
||||
items:
|
||||
@@ -608,7 +598,7 @@ spec:
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Tenant is the Schema for the tenants API.
|
||||
description: Tenant is the Schema for the tenants API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
@@ -619,7 +609,7 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: TenantSpec defines the desired state of Tenant.
|
||||
description: TenantSpec defines the desired state of Tenant
|
||||
properties:
|
||||
additionalRoleBindings:
|
||||
description: Specifies additional RoleBindings assigned to the Tenant. Capsule will ensure that all namespaces in the Tenant always contain the RoleBinding for the given ClusterRole. Optional.
|
||||
@@ -707,7 +697,7 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
limitRanges:
|
||||
description: Specifies the resource min/max usage restrictions to the Tenant. The assigned values are inherited by any namespace created in the Tenant. Optional.
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
properties:
|
||||
items:
|
||||
items:
|
||||
@@ -1065,7 +1055,7 @@ spec:
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Specifies the label to control the placement of pods on a given pool of worker nodes. All namespaces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
description: Specifies the label to control the placement of pods on a given pool of worker nodes. All namesapces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
type: object
|
||||
owners:
|
||||
description: Specifies the owners of the Tenant. Mandatory.
|
||||
@@ -1234,7 +1224,7 @@ spec:
|
||||
- owners
|
||||
type: object
|
||||
status:
|
||||
description: Returns the observed state of the Tenant.
|
||||
description: Returns the observed state of the Tenant
|
||||
properties:
|
||||
namespaces:
|
||||
description: List of namespaces assigned to the Tenant.
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
|
||||
# Check the capsule logs
|
||||
$ kubectl logs -f deployment/{{ template "capsule.fullname" . }}-controller-manager -c manager -n {{ .Release.Namespace }}
|
||||
$ kubectl logs -f deployment/{{ template "capsule.fullname" . }}-controller-manager -c manager -n{{ .Release.Namespace }}
|
||||
|
||||
- Manage this chart:
|
||||
|
||||
|
||||
@@ -65,6 +65,7 @@ ServiceAccount annotations
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
@@ -90,38 +91,30 @@ Create the proxy fully-qualified Docker image to use
|
||||
{{- printf "%s:%s" .Values.proxy.image.repository .Values.proxy.image.tag -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Determine the Kubernetes version to use for jobsFullyQualifiedDockerImage tag
|
||||
*/}}
|
||||
{{- define "capsule.jobsTagKubeVersion" -}}
|
||||
{{- if contains "-eks-" .Capabilities.KubeVersion.GitVersion }}
|
||||
{{- print "v" .Capabilities.KubeVersion.Major "." (.Capabilities.KubeVersion.Minor | replace "+" "") -}}
|
||||
{{- else }}
|
||||
{{- print "v" .Capabilities.KubeVersion.Major "." .Capabilities.KubeVersion.Minor -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the jobs fully-qualified Docker image to use
|
||||
*/}}
|
||||
{{- define "capsule.jobsFullyQualifiedDockerImage" -}}
|
||||
{{- if .Values.jobs.image.tag }}
|
||||
{{- printf "%s:%s" .Values.jobs.image.repository .Values.jobs.image.tag -}}
|
||||
{{- else }}
|
||||
{{- printf "%s:%s" .Values.jobs.image.repository (include "capsule.jobsTagKubeVersion" .) -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the Capsule controller name to use
|
||||
Create the Capsule Deployment name to use
|
||||
*/}}
|
||||
{{- define "capsule.controllerName" -}}
|
||||
{{- define "capsule.deploymentName" -}}
|
||||
{{- printf "%s-controller-manager" (include "capsule.fullname" .) -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the Capsule CA Secret name to use
|
||||
*/}}
|
||||
{{- define "capsule.secretCaName" -}}
|
||||
{{- printf "%s-ca" (include "capsule.fullname" .) -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the Capsule TLS Secret name to use
|
||||
*/}}
|
||||
{{- define "capsule.secretTlsName" -}}
|
||||
{{ default ( printf "%s-tls" ( include "capsule.fullname" . ) ) .Values.tls.name }}
|
||||
{{- printf "%s-tls" (include "capsule.fullname" .) -}}
|
||||
{{- end }}
|
||||
|
||||
11
charts/capsule/templates/ca.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "capsule.secretCaName" . }}
|
||||
data:
|
||||
@@ -1,36 +0,0 @@
|
||||
{{- if .Values.certManager.generateCertificates }}
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-selfsigned
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
selfSigned: {}
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-cert
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
dnsNames:
|
||||
- {{ include "capsule.fullname" . }}-webhook-service.{{ .Release.Namespace }}.svc
|
||||
- {{ include "capsule.fullname" . }}-webhook-service.{{ .Release.Namespace }}.svc.cluster.local
|
||||
issuerRef:
|
||||
kind: Issuer
|
||||
name: {{ include "capsule.fullname" . }}-webhook-selfsigned
|
||||
secretName: {{ include "capsule.secretTlsName" . }}
|
||||
subject:
|
||||
organizations:
|
||||
- clastix.io
|
||||
{{- end }}
|
||||
@@ -1,4 +1,3 @@
|
||||
{{- if or (not .Values.certManager.generateCertificates) (.Values.tls.create) }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
@@ -9,4 +8,4 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "capsule.secretTlsName" . }}
|
||||
{{- end }}
|
||||
data:
|
||||
|
||||
@@ -4,12 +4,8 @@ metadata:
|
||||
name: default
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
capsule.clastix.io/mutating-webhook-configuration-name: {{ include "capsule.fullname" . }}-mutating-webhook-configuration
|
||||
capsule.clastix.io/tls-secret-name: {{ include "capsule.secretTlsName" . }}
|
||||
capsule.clastix.io/validating-webhook-configuration-name: {{ include "capsule.fullname" . }}-validating-webhook-configuration
|
||||
capsule.clastix.io/enable-tls-configuration: "{{ .Values.tls.enableController }}"
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
{{- if eq .Values.manager.kind "DaemonSet" }}
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ include "capsule.controllerName" . }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "capsule.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
|
||||
{{- if .Values.manager.hostNetwork }}
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
{{- end }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: cert
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: {{ include "capsule.secretTlsName" . }}
|
||||
containers:
|
||||
- name: manager
|
||||
command:
|
||||
- /manager
|
||||
args:
|
||||
- --enable-leader-election
|
||||
- --zap-log-level={{ default 4 .Values.manager.options.logLevel }}
|
||||
- --configuration-name=default
|
||||
image: {{ include "capsule.managerFullyQualifiedDockerImage" . }}
|
||||
imagePullPolicy: {{ .Values.manager.image.pullPolicy }}
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- name: webhook-server
|
||||
containerPort: 9443
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.manager.livenessProbe | nindent 12}}
|
||||
readinessProbe:
|
||||
{{- toYaml .Values.manager.readinessProbe | nindent 12}}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: cert
|
||||
readOnly: true
|
||||
resources:
|
||||
{{- toYaml .Values.manager.resources | nindent 12 }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
{{- end }}
|
||||
@@ -1,8 +1,7 @@
|
||||
{{- if eq .Values.manager.kind "Deployment" }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "capsule.controllerName" . }}
|
||||
name: {{ include "capsule.deploymentName" . }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
@@ -30,7 +29,6 @@ spec:
|
||||
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
|
||||
{{- if .Values.manager.hostNetwork }}
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
{{- end }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
@@ -49,7 +47,7 @@ spec:
|
||||
- name: cert
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: {{ include "capsule.secretTlsName" . }}
|
||||
secretName: {{ include "capsule.fullname" . }}-tls
|
||||
containers:
|
||||
- name: manager
|
||||
command:
|
||||
@@ -84,4 +82,3 @@ spec:
|
||||
{{- toYaml .Values.manager.resources | nindent 12 }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
{{- end }}
|
||||
|
||||
@@ -4,11 +4,8 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-mutating-webhook-configuration
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.certManager.generateCertificates }}
|
||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "capsule.fullname" . }}-webhook-cert
|
||||
{{- end }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
webhooks:
|
||||
@@ -16,9 +13,7 @@ webhooks:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
{{- if not .Values.certManager.generateCertificates }}
|
||||
caBundle: Cg==
|
||||
{{- end }}
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
{{- if .Values.tls.create }}
|
||||
{{- $cmd := printf "while [ -z $$(kubectl -n $NAMESPACE get secret %s -o jsonpath='{.data.tls\\\\.crt}') ];" (include "capsule.secretTlsName" .) -}}
|
||||
{{- $cmd := "while [ -z $$(kubectl -n $NAMESPACE get secret capsule-tls -o jsonpath='{.data.tls\\\\.crt}') ];" -}}
|
||||
{{- $cmd = printf "%s do echo 'waiting Capsule to be up and running...' && sleep 5;" $cmd -}}
|
||||
{{- $cmd = printf "%s done" $cmd -}}
|
||||
apiVersion: batch/v1
|
||||
@@ -26,14 +25,6 @@ spec:
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: post-install-job
|
||||
@@ -45,5 +36,4 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
|
||||
@@ -1,7 +1,5 @@
|
||||
{{- $cmd := ""}}
|
||||
{{- if or (.Values.tls.create) (.Values.certManager.generateCertificates) }}
|
||||
{{- $cmd = printf "%s kubectl delete secret -n $NAMESPACE %s --ignore-not-found &&" $cmd (include "capsule.secretTlsName" .) -}}
|
||||
{{- end }}
|
||||
{{- $cmd := printf "kubectl scale deployment -n $NAMESPACE %s --replicas 0 &&" (include "capsule.deploymentName" .) -}}
|
||||
{{- $cmd = printf "%s kubectl delete secret -n $NAMESPACE %s %s --ignore-not-found &&" $cmd (include "capsule.secretTlsName" .) (include "capsule.secretCaName" .) -}}
|
||||
{{- $cmd = printf "%s kubectl delete clusterroles.rbac.authorization.k8s.io capsule-namespace-deleter capsule-namespace-provisioner --ignore-not-found &&" $cmd -}}
|
||||
{{- $cmd = printf "%s kubectl delete clusterrolebindings.rbac.authorization.k8s.io capsule-namespace-deleter capsule-namespace-provisioner --ignore-not-found" $cmd -}}
|
||||
apiVersion: batch/v1
|
||||
@@ -28,14 +26,6 @@ spec:
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: pre-delete-job
|
||||
|
||||
@@ -15,33 +15,17 @@ metadata:
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
{{- with .Values.serviceMonitor.endpoint }}
|
||||
- interval: {{ .interval }}
|
||||
- interval: 15s
|
||||
port: metrics
|
||||
path: /metrics
|
||||
{{- with .scrapeTimeout }}
|
||||
scrapeTimeout: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .metricRelabelings }}
|
||||
metricRelabelings: {{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .relabelings }}
|
||||
relabelings: {{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
jobLabel: app.kubernetes.io/name
|
||||
{{- with .Values.serviceMonitor.targetLabels }}
|
||||
targetLabels: {{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- if .Values.serviceMonitor.matchLabels }}
|
||||
{{- toYaml .Values.serviceMonitor.matchLabels | nindent 6 }}
|
||||
{{- else }}
|
||||
{{- include "capsule.labels" . | nindent 6 }}
|
||||
{{- include "capsule.labels" . | nindent 6 }}
|
||||
{{- with .Values.serviceMonitor.matchLabels }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
@@ -4,11 +4,8 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-validating-webhook-configuration
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.certManager.generateCertificates }}
|
||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "capsule.fullname" . }}-webhook-cert
|
||||
{{- end }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
webhooks:
|
||||
@@ -16,9 +13,7 @@ webhooks:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
{{- if not .Values.certManager.generateCertificates }}
|
||||
caBundle: Cg==
|
||||
{{- end }}
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -48,9 +43,7 @@ webhooks:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
{{- if not .Values.certManager.generateCertificates }}
|
||||
caBundle: Cg==
|
||||
{{- end }}
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -81,9 +74,7 @@ webhooks:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
{{- if not .Values.certManager.generateCertificates }}
|
||||
caBundle: Cg==
|
||||
{{- end }}
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -112,9 +103,7 @@ webhooks:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
{{- if not .Values.certManager.generateCertificates }}
|
||||
caBundle: Cg==
|
||||
{{- end }}
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -143,9 +132,7 @@ webhooks:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
{{- if not .Values.certManager.generateCertificates }}
|
||||
caBundle: Cg==
|
||||
{{- end }}
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -173,9 +160,7 @@ webhooks:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
{{- if not .Values.certManager.generateCertificates }}
|
||||
caBundle: Cg==
|
||||
{{- end }}
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -201,9 +186,7 @@ webhooks:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
{{- if not .Values.certManager.generateCertificates }}
|
||||
caBundle: Cg==
|
||||
{{- end }}
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -232,9 +215,7 @@ webhooks:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
{{- if not .Values.certManager.generateCertificates }}
|
||||
caBundle: Cg==
|
||||
{{- end }}
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -259,31 +240,3 @@ webhooks:
|
||||
scope: '*'
|
||||
sideEffects: None
|
||||
timeoutSeconds: {{ .Values.validatingWebhooksTimeoutSeconds }}
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
{{- if not .Values.certManager.generateCertificates }}
|
||||
caBundle: Cg==
|
||||
{{- end }}
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /nodes
|
||||
port: 443
|
||||
failurePolicy: {{ .Values.webhooks.nodes.failurePolicy }}
|
||||
name: nodes.capsule.clastix.io
|
||||
matchPolicy: Exact
|
||||
namespaceSelector: {}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- UPDATE
|
||||
resources:
|
||||
- nodes
|
||||
sideEffects: None
|
||||
timeoutSeconds: {{ .Values.validatingWebhooksTimeoutSeconds }}
|
||||
|
||||
@@ -2,59 +2,29 @@
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# Secret Options
|
||||
tls:
|
||||
# -- Start the Capsule controller that injects the CA into mutating and validating webhooks, and CRD as well.
|
||||
enableController: true
|
||||
# -- When cert-manager is disabled, Capsule will generate the TLS certificate for webhook and CRDs conversion.
|
||||
create: true
|
||||
# -- Override name of the Capsule TLS Secret name when externally managed.
|
||||
name: ""
|
||||
|
||||
# Manager Options
|
||||
manager:
|
||||
|
||||
# -- Set the controller deployment mode as `Deployment` or `DaemonSet`.
|
||||
kind: Deployment
|
||||
|
||||
image:
|
||||
# -- Set the image repository of the capsule.
|
||||
repository: clastix/capsule
|
||||
# -- Set the image pull policy.
|
||||
repository: quay.io/clastix/capsule
|
||||
pullPolicy: IfNotPresent
|
||||
# -- Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ''
|
||||
|
||||
# -- Configuration for `imagePullSecrets` so that you can use a private images registry.
|
||||
imagePullSecrets: []
|
||||
|
||||
# -- Specifies if the container should be started in hostNetwork mode.
|
||||
# Specifies if the container should be started in hostNetwork mode.
|
||||
#
|
||||
# Required for use in some managed kubernetes clusters (such as AWS EKS) with custom
|
||||
# CNI (such as calico), because control-plane managed by AWS cannot communicate
|
||||
# with pods' IP CIDR and admission webhooks are not working
|
||||
hostNetwork: false
|
||||
|
||||
# Additional Capsule Controller Options
|
||||
# Additional Capsule options
|
||||
options:
|
||||
# -- Set the log verbosity of the capsule with a value from 1 to 10
|
||||
logLevel: '4'
|
||||
# -- Boolean, enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash
|
||||
forceTenantPrefix: false
|
||||
# -- Override the Capsule user groups
|
||||
capsuleUserGroups: ["capsule.clastix.io"]
|
||||
# -- If specified, disallows creation of namespaces matching the passed regexp
|
||||
protectedNamespaceRegex: ""
|
||||
# -- Specifies whether capsule webhooks certificates should be generated by capsule operator
|
||||
generateCertificates: true
|
||||
|
||||
# -- Configure the liveness probe using Deployment probe spec
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10080
|
||||
|
||||
# -- Configure the readiness probe using Deployment probe spec
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
@@ -67,63 +37,46 @@ manager:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 128Mi
|
||||
|
||||
# -- Annotations to add to the capsule pod.
|
||||
jobs:
|
||||
image:
|
||||
repository: quay.io/clastix/kubectl
|
||||
pullPolicy: IfNotPresent
|
||||
tag: "v1.20.7"
|
||||
imagePullSecrets: []
|
||||
serviceAccount:
|
||||
create: true
|
||||
annotations: {}
|
||||
name: "capsule"
|
||||
podAnnotations: {}
|
||||
# The following annotations guarantee scheduling for critical add-on pods
|
||||
# podAnnotations:
|
||||
# scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
|
||||
# -- Set the priority class name of the Capsule pod
|
||||
priorityClassName: '' #system-cluster-critical
|
||||
|
||||
# -- Set the node selector for the Capsule pod
|
||||
nodeSelector: {}
|
||||
# node-role.kubernetes.io/master: ""
|
||||
|
||||
# -- Set list of tolerations for the Capsule pod
|
||||
tolerations: []
|
||||
#- key: CriticalAddonsOnly
|
||||
# operator: Exists
|
||||
#- effect: NoSchedule
|
||||
# key: node-role.kubernetes.io/master
|
||||
|
||||
# -- Set the replica count for capsule pod
|
||||
replicaCount: 1
|
||||
|
||||
# -- Set affinity rules for the Capsule pod
|
||||
affinity: {}
|
||||
|
||||
podSecurityPolicy:
|
||||
# -- Specify if a Pod Security Policy must be created
|
||||
enabled: false
|
||||
|
||||
jobs:
|
||||
image:
|
||||
# -- Set the image repository of the helm chart job
|
||||
repository: quay.io/clastix/kubectl
|
||||
# -- Set the image pull policy of the helm chart job
|
||||
pullPolicy: IfNotPresent
|
||||
# -- Set the image tag of the helm chart job
|
||||
tag: ""
|
||||
|
||||
# ServiceAccount
|
||||
serviceAccount:
|
||||
# -- Specifies whether a service account should be created.
|
||||
create: true
|
||||
# -- Annotations to add to the service account.
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
# Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one)
|
||||
namespace: ''
|
||||
# Assign additional labels according to Prometheus' serviceMonitorSelector matching labels
|
||||
labels: {}
|
||||
annotations: {}
|
||||
# -- The name of the service account to use. If not set and `serviceAccount.create=true`, a name is generated using the fullname template
|
||||
name: "capsule"
|
||||
matchLabels: {}
|
||||
serviceAccount:
|
||||
name: capsule
|
||||
namespace: capsule-system
|
||||
|
||||
certManager:
|
||||
# -- Specifies whether capsule webhooks certificates should be generated using cert-manager
|
||||
generateCertificates: false
|
||||
|
||||
# -- Additional labels which will be added to all resources created by Capsule helm chart
|
||||
# Additional labels
|
||||
customLabels: {}
|
||||
|
||||
# -- Additional annotations which will be added to all resources created by Capsule helm chart
|
||||
# Additional annotations
|
||||
customAnnotations: {}
|
||||
|
||||
# Webhooks configurations
|
||||
@@ -170,39 +123,5 @@ webhooks:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
nodes:
|
||||
failurePolicy: Fail
|
||||
|
||||
# -- Timeout in seconds for mutating webhooks
|
||||
mutatingWebhooksTimeoutSeconds: 30
|
||||
# -- Timeout in seconds for validating webhooks
|
||||
validatingWebhooksTimeoutSeconds: 30
|
||||
|
||||
# ServiceMonitor
|
||||
serviceMonitor:
|
||||
# -- Enable ServiceMonitor
|
||||
enabled: false
|
||||
# -- Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one)
|
||||
namespace: ''
|
||||
# -- Assign additional labels according to Prometheus' serviceMonitorSelector matching labels
|
||||
labels: {}
|
||||
# -- Assign additional Annotations
|
||||
annotations: {}
|
||||
# -- Change matching labels
|
||||
matchLabels: {}
|
||||
# -- Set targetLabels for the serviceMonitor
|
||||
targetLabels: []
|
||||
serviceAccount:
|
||||
# -- ServiceAccount for Metrics RBAC
|
||||
name: capsule
|
||||
# -- ServiceAccount Namespace for Metrics RBAC
|
||||
namespace: capsule-system
|
||||
endpoint:
|
||||
# -- Set the scrape interval for the endpoint of the serviceMonitor
|
||||
interval: "15s"
|
||||
# -- Set the scrape timeout for the endpoint of the serviceMonitor
|
||||
scrapeTimeout: ""
|
||||
# -- Set metricRelabelings for the endpoint of the serviceMonitor
|
||||
metricRelabelings: []
|
||||
# -- Set relabelings for the endpoint of the serviceMonitor
|
||||
relabelings: []
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: CapsuleConfiguration is the Schema for the Capsule configuration API.
|
||||
description: CapsuleConfiguration is the Schema for the Capsule configuration API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
@@ -30,7 +30,7 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration.
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration
|
||||
properties:
|
||||
forceTenantPrefix:
|
||||
default: false
|
||||
|
||||
@@ -46,7 +46,7 @@ spec:
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Tenant is the Schema for the tenants API.
|
||||
description: Tenant is the Schema for the tenants API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
@@ -57,7 +57,7 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: TenantSpec defines the desired state of Tenant.
|
||||
description: TenantSpec defines the desired state of Tenant
|
||||
properties:
|
||||
additionalRoleBindings:
|
||||
items:
|
||||
@@ -475,7 +475,7 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
owner:
|
||||
description: OwnerSpec defines tenant owner name and kind.
|
||||
description: OwnerSpec defines tenant owner name and kind
|
||||
properties:
|
||||
kind:
|
||||
enum:
|
||||
@@ -558,7 +558,7 @@ spec:
|
||||
- owner
|
||||
type: object
|
||||
status:
|
||||
description: TenantStatus defines the observed state of Tenant.
|
||||
description: TenantStatus defines the observed state of Tenant
|
||||
properties:
|
||||
namespaces:
|
||||
items:
|
||||
@@ -598,7 +598,7 @@ spec:
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Tenant is the Schema for the tenants API.
|
||||
description: Tenant is the Schema for the tenants API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
@@ -609,7 +609,7 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: TenantSpec defines the desired state of Tenant.
|
||||
description: TenantSpec defines the desired state of Tenant
|
||||
properties:
|
||||
additionalRoleBindings:
|
||||
description: Specifies additional RoleBindings assigned to the Tenant. Capsule will ensure that all namespaces in the Tenant always contain the RoleBinding for the given ClusterRole. Optional.
|
||||
@@ -697,7 +697,7 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
limitRanges:
|
||||
description: Specifies the resource min/max usage restrictions to the Tenant. The assigned values are inherited by any namespace created in the Tenant. Optional.
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
properties:
|
||||
items:
|
||||
items:
|
||||
@@ -1055,7 +1055,7 @@ spec:
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Specifies the label to control the placement of pods on a given pool of worker nodes. All namespaces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
description: Specifies the label to control the placement of pods on a given pool of worker nodes. All namesapces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
type: object
|
||||
owners:
|
||||
description: Specifies the owners of the Tenant. Mandatory.
|
||||
@@ -1224,7 +1224,7 @@ spec:
|
||||
- owners
|
||||
type: object
|
||||
status:
|
||||
description: Returns the observed state of the Tenant.
|
||||
description: Returns the observed state of the Tenant
|
||||
properties:
|
||||
namespaces:
|
||||
description: List of namespaces assigned to the Tenant.
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
# label selector used by Grafana to load the dashboards from Config Maps
|
||||
grafana_dashboard: "1"
|
||||
name: capsule-grafana-dashboard
|
||||
@@ -1,8 +0,0 @@
|
||||
configMapGenerator:
|
||||
- name: capsule-grafana-dashboard
|
||||
files:
|
||||
- dashboard.json
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
patchesStrategicMerge:
|
||||
- dashboard.yaml
|
||||
@@ -24,7 +24,7 @@ spec:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: CapsuleConfiguration is the Schema for the Capsule configuration API.
|
||||
description: CapsuleConfiguration is the Schema for the Capsule configuration API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
@@ -35,7 +35,7 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration.
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration
|
||||
properties:
|
||||
forceTenantPrefix:
|
||||
default: false
|
||||
@@ -118,7 +118,7 @@ spec:
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Tenant is the Schema for the tenants API.
|
||||
description: Tenant is the Schema for the tenants API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
@@ -129,7 +129,7 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: TenantSpec defines the desired state of Tenant.
|
||||
description: TenantSpec defines the desired state of Tenant
|
||||
properties:
|
||||
additionalRoleBindings:
|
||||
items:
|
||||
@@ -547,7 +547,7 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
owner:
|
||||
description: OwnerSpec defines tenant owner name and kind.
|
||||
description: OwnerSpec defines tenant owner name and kind
|
||||
properties:
|
||||
kind:
|
||||
enum:
|
||||
@@ -630,7 +630,7 @@ spec:
|
||||
- owner
|
||||
type: object
|
||||
status:
|
||||
description: TenantStatus defines the observed state of Tenant.
|
||||
description: TenantStatus defines the observed state of Tenant
|
||||
properties:
|
||||
namespaces:
|
||||
items:
|
||||
@@ -670,7 +670,7 @@ spec:
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Tenant is the Schema for the tenants API.
|
||||
description: Tenant is the Schema for the tenants API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
@@ -681,7 +681,7 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: TenantSpec defines the desired state of Tenant.
|
||||
description: TenantSpec defines the desired state of Tenant
|
||||
properties:
|
||||
additionalRoleBindings:
|
||||
description: Specifies additional RoleBindings assigned to the Tenant. Capsule will ensure that all namespaces in the Tenant always contain the RoleBinding for the given ClusterRole. Optional.
|
||||
@@ -769,7 +769,7 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
limitRanges:
|
||||
description: Specifies the resource min/max usage restrictions to the Tenant. The assigned values are inherited by any namespace created in the Tenant. Optional.
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
properties:
|
||||
items:
|
||||
items:
|
||||
@@ -1127,7 +1127,7 @@ spec:
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Specifies the label to control the placement of pods on a given pool of worker nodes. All namespaces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
description: Specifies the label to control the placement of pods on a given pool of worker nodes. All namesapces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
type: object
|
||||
owners:
|
||||
description: Specifies the owners of the Tenant. Mandatory.
|
||||
@@ -1296,7 +1296,7 @@ spec:
|
||||
- owners
|
||||
type: object
|
||||
status:
|
||||
description: Returns the observed state of the Tenant.
|
||||
description: Returns the observed state of the Tenant
|
||||
properties:
|
||||
namespaces:
|
||||
description: List of namespaces assigned to the Tenant.
|
||||
@@ -1411,7 +1411,7 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: clastix/capsule:v0.1.2
|
||||
image: quay.io/clastix/capsule:v0.1.1-rc0
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: manager
|
||||
ports:
|
||||
@@ -1582,29 +1582,6 @@ webhooks:
|
||||
- networkpolicies
|
||||
scope: Namespaced
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: capsule-webhook-service
|
||||
namespace: capsule-system
|
||||
path: /nodes
|
||||
failurePolicy: Fail
|
||||
name: nodes.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- UPDATE
|
||||
resources:
|
||||
- nodes
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
|
||||
@@ -6,5 +6,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: clastix/capsule
|
||||
newTag: v0.1.2
|
||||
newName: quay.io/clastix/capsule
|
||||
newTag: v0.1.1-rc0
|
||||
|
||||
@@ -118,25 +118,6 @@ webhooks:
|
||||
resources:
|
||||
- networkpolicies
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /nodes
|
||||
failurePolicy: Fail
|
||||
name: nodes.capsule.clastix.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- UPDATE
|
||||
resources:
|
||||
- nodes
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
|
||||
@@ -34,12 +34,6 @@
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
- op: add
|
||||
path: /webhooks/7/namespaceSelector
|
||||
value:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
- op: add
|
||||
path: /webhooks/0/rules/0/scope
|
||||
value: Namespaced
|
||||
@@ -49,12 +43,12 @@
|
||||
- op: add
|
||||
path: /webhooks/3/rules/0/scope
|
||||
value: Namespaced
|
||||
- op: add
|
||||
path: /webhooks/4/rules/0/scope
|
||||
value: Namespaced
|
||||
- op: add
|
||||
path: /webhooks/5/rules/0/scope
|
||||
value: Namespaced
|
||||
- op: add
|
||||
path: /webhooks/6/rules/0/scope
|
||||
value: Namespaced
|
||||
- op: add
|
||||
path: /webhooks/7/rules/0/scope
|
||||
value: Namespaced
|
||||
|
||||
@@ -9,11 +9,13 @@ import (
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
capsulev1alpha1 "github.com/clastix/capsule/api/v1alpha1"
|
||||
"github.com/clastix/capsule/controllers/utils"
|
||||
"github.com/clastix/capsule/pkg/configuration"
|
||||
)
|
||||
|
||||
@@ -22,23 +24,44 @@ type Manager struct {
|
||||
Client client.Client
|
||||
}
|
||||
|
||||
// InjectClient injects the Client interface, required by the Runnable interface.
|
||||
// InjectClient injects the Client interface, required by the Runnable interface
|
||||
func (c *Manager) InjectClient(client client.Client) error {
|
||||
c.Client = client
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func filterByName(objName, desired string) bool {
|
||||
return objName == desired
|
||||
}
|
||||
|
||||
func forOptionPerInstanceName(instanceName string) builder.ForOption {
|
||||
return builder.WithPredicates(predicate.Funcs{
|
||||
CreateFunc: func(event event.CreateEvent) bool {
|
||||
return filterByName(event.Object.GetName(), instanceName)
|
||||
},
|
||||
DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
|
||||
return filterByName(deleteEvent.Object.GetName(), instanceName)
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
|
||||
return filterByName(updateEvent.ObjectNew.GetName(), instanceName)
|
||||
},
|
||||
GenericFunc: func(genericEvent event.GenericEvent) bool {
|
||||
return filterByName(genericEvent.Object.GetName(), instanceName)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Manager) SetupWithManager(mgr ctrl.Manager, configurationName string) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&capsulev1alpha1.CapsuleConfiguration{}, utils.NamesMatchingPredicate(configurationName)).
|
||||
For(&capsulev1alpha1.CapsuleConfiguration{}, forOptionPerInstanceName(configurationName)).
|
||||
Complete(c)
|
||||
}
|
||||
|
||||
func (c *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
c.Log.Info("CapsuleConfiguration reconciliation started", "request.name", request.Name)
|
||||
|
||||
cfg := configuration.NewCapsuleConfiguration(ctx, c.Client, request.Name)
|
||||
cfg := configuration.NewCapsuleConfiguration(c.Client, request.Name)
|
||||
// Validating the Capsule Configuration options
|
||||
if _, err = cfg.ProtectedNamespaceRegexp(); err != nil {
|
||||
panic(errors.Wrap(err, "Invalid configuration for protected Namespace regex"))
|
||||
|
||||
@@ -23,7 +23,7 @@ var (
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"namespaces"},
|
||||
Verbs: []string{"create", "patch"},
|
||||
Verbs: []string{"create"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -35,7 +35,7 @@ var (
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"namespaces"},
|
||||
Verbs: []string{"delete"},
|
||||
Verbs: []string{"delete", "patch"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -48,7 +48,7 @@ var (
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Kind: "ClusterRole",
|
||||
Name: ProvisionerRoleName,
|
||||
APIGroup: rbacv1.GroupName,
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -10,19 +10,20 @@ import (
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
|
||||
capsulev1alpha1 "github.com/clastix/capsule/api/v1alpha1"
|
||||
"github.com/clastix/capsule/controllers/utils"
|
||||
"github.com/clastix/capsule/pkg/configuration"
|
||||
)
|
||||
|
||||
@@ -32,40 +33,65 @@ type Manager struct {
|
||||
Configuration configuration.Configuration
|
||||
}
|
||||
|
||||
// InjectClient injects the Client interface, required by the Runnable interface.
|
||||
// InjectClient injects the Client interface, required by the Runnable interface
|
||||
func (r *Manager) InjectClient(c client.Client) error {
|
||||
r.Client = c
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Manager) SetupWithManager(ctx context.Context, mgr ctrl.Manager, configurationName string) (err error) {
|
||||
namesPredicate := utils.NamesMatchingPredicate(ProvisionerRoleName, DeleterRoleName)
|
||||
func (r *Manager) filterByNames(name string) bool {
|
||||
return name == ProvisionerRoleName || name == DeleterRoleName
|
||||
}
|
||||
|
||||
//nolint:dupl
|
||||
func (r *Manager) SetupWithManager(mgr ctrl.Manager, configurationName string) (err error) {
|
||||
crErr := ctrl.NewControllerManagedBy(mgr).
|
||||
For(&rbacv1.ClusterRole{}, namesPredicate).
|
||||
For(&rbacv1.ClusterRole{}, builder.WithPredicates(predicate.Funcs{
|
||||
CreateFunc: func(event event.CreateEvent) bool {
|
||||
return r.filterByNames(event.Object.GetName())
|
||||
},
|
||||
DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
|
||||
return r.filterByNames(deleteEvent.Object.GetName())
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
|
||||
return r.filterByNames(updateEvent.ObjectNew.GetName())
|
||||
},
|
||||
GenericFunc: func(genericEvent event.GenericEvent) bool {
|
||||
return r.filterByNames(genericEvent.Object.GetName())
|
||||
},
|
||||
})).
|
||||
Complete(r)
|
||||
if crErr != nil {
|
||||
err = multierror.Append(err, crErr)
|
||||
}
|
||||
|
||||
crbErr := ctrl.NewControllerManagedBy(mgr).
|
||||
For(&rbacv1.ClusterRoleBinding{}, namesPredicate).
|
||||
For(&rbacv1.ClusterRoleBinding{}, builder.WithPredicates(predicate.Funcs{
|
||||
CreateFunc: func(event event.CreateEvent) bool {
|
||||
return r.filterByNames(event.Object.GetName())
|
||||
},
|
||||
DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
|
||||
return r.filterByNames(deleteEvent.Object.GetName())
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
|
||||
return r.filterByNames(updateEvent.ObjectNew.GetName())
|
||||
},
|
||||
GenericFunc: func(genericEvent event.GenericEvent) bool {
|
||||
return r.filterByNames(genericEvent.Object.GetName())
|
||||
},
|
||||
})).
|
||||
Watches(source.NewKindWithCache(&capsulev1alpha1.CapsuleConfiguration{}, mgr.GetCache()), handler.Funcs{
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
if updateEvent.ObjectNew.GetName() == configurationName {
|
||||
if crbErr := r.EnsureClusterRoleBindings(ctx); crbErr != nil {
|
||||
if crbErr := r.EnsureClusterRoleBindings(); crbErr != nil {
|
||||
r.Log.Error(err, "cannot update ClusterRoleBinding upon CapsuleConfiguration update")
|
||||
}
|
||||
}
|
||||
},
|
||||
}).
|
||||
Complete(r)
|
||||
|
||||
if crbErr != nil {
|
||||
err = multierror.Append(err, crbErr)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -74,19 +100,18 @@ func (r *Manager) SetupWithManager(ctx context.Context, mgr ctrl.Manager, config
|
||||
func (r *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
switch request.Name {
|
||||
case ProvisionerRoleName:
|
||||
if err = r.EnsureClusterRole(ctx, ProvisionerRoleName); err != nil {
|
||||
if err = r.EnsureClusterRole(ProvisionerRoleName); err != nil {
|
||||
r.Log.Error(err, "Reconciliation for ClusterRole failed", "ClusterRole", ProvisionerRoleName)
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
if err = r.EnsureClusterRoleBindings(ctx); err != nil {
|
||||
if err = r.EnsureClusterRoleBindings(); err != nil {
|
||||
r.Log.Error(err, "Reconciliation for ClusterRoleBindings failed")
|
||||
|
||||
break
|
||||
}
|
||||
case DeleterRoleName:
|
||||
if err = r.EnsureClusterRole(ctx, DeleterRoleName); err != nil {
|
||||
if err = r.EnsureClusterRole(DeleterRoleName); err != nil {
|
||||
r.Log.Error(err, "Reconciliation for ClusterRole failed", "ClusterRole", DeleterRoleName)
|
||||
}
|
||||
}
|
||||
@@ -94,14 +119,14 @@ func (r *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Manager) EnsureClusterRoleBindings(ctx context.Context) (err error) {
|
||||
func (r *Manager) EnsureClusterRoleBindings() (err error) {
|
||||
crb := &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ProvisionerRoleName,
|
||||
},
|
||||
}
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, crb, func() (err error) {
|
||||
_, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, crb, func() (err error) {
|
||||
crb.RoleRef = provisionerClusterRoleBinding.RoleRef
|
||||
|
||||
crb.Subjects = []rbacv1.Subject{}
|
||||
@@ -119,7 +144,7 @@ func (r *Manager) EnsureClusterRoleBindings(ctx context.Context) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Manager) EnsureClusterRole(ctx context.Context, roleName string) (err error) {
|
||||
func (r *Manager) EnsureClusterRole(roleName string) (err error) {
|
||||
role, ok := clusterRoles[roleName]
|
||||
if !ok {
|
||||
return fmt.Errorf("clusterRole %s is not mapped", roleName)
|
||||
@@ -131,9 +156,8 @@ func (r *Manager) EnsureClusterRole(ctx context.Context, roleName string) (err e
|
||||
},
|
||||
}
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, clusterRole, func() error {
|
||||
_, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, clusterRole, func() error {
|
||||
clusterRole.Rules = role.Rules
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -146,9 +170,8 @@ func (r *Manager) EnsureClusterRole(ctx context.Context, roleName string) (err e
|
||||
func (r *Manager) Start(ctx context.Context) error {
|
||||
for roleName := range clusterRoles {
|
||||
r.Log.Info("setting up ClusterRoles", "ClusterRole", roleName)
|
||||
|
||||
if err := r.EnsureClusterRole(ctx, roleName); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
if err := r.EnsureClusterRole(roleName); err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -157,9 +180,8 @@ func (r *Manager) Start(ctx context.Context) error {
|
||||
}
|
||||
|
||||
r.Log.Info("setting up ClusterRoleBindings")
|
||||
|
||||
if err := r.EnsureClusterRoleBindings(ctx); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
if err := r.EnsureClusterRoleBindings(); err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
212
controllers/secret/ca.go
Normal file
@@ -0,0 +1,212 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package secret
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"golang.org/x/sync/errgroup"
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/pointer"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/clastix/capsule/pkg/cert"
|
||||
)
|
||||
|
||||
type CAReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Namespace string
|
||||
}
|
||||
|
||||
func (r *CAReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&corev1.Secret{}, forOptionPerInstanceName(caSecretName)).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
// By default helm doesn't allow to use templates in CRD (https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#method-1-let-helm-do-it-for-you).
|
||||
// In order to overcome this, we are setting conversion strategy in helm chart to None, and then update it with CA and namespace information.
|
||||
func (r *CAReconciler) UpdateCustomResourceDefinition(caBundle []byte) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
crd := &apiextensionsv1.CustomResourceDefinition{}
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: "tenants.capsule.clastix.io"}, crd)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot retrieve CustomResourceDefinition")
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, crd, func() error {
|
||||
crd.Spec.Conversion = &apiextensionsv1.CustomResourceConversion{
|
||||
Strategy: "Webhook",
|
||||
Webhook: &apiextensionsv1.WebhookConversion{
|
||||
ClientConfig: &apiextensionsv1.WebhookClientConfig{
|
||||
Service: &apiextensionsv1.ServiceReference{
|
||||
Namespace: r.Namespace,
|
||||
Name: "capsule-webhook-service",
|
||||
Path: pointer.StringPtr("/convert"),
|
||||
Port: pointer.Int32Ptr(443),
|
||||
},
|
||||
CABundle: caBundle,
|
||||
},
|
||||
ConversionReviewVersions: []string{"v1alpha1", "v1beta1"},
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
//nolint:dupl
|
||||
func (r CAReconciler) UpdateValidatingWebhookConfiguration(caBundle []byte) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
vw := &admissionregistrationv1.ValidatingWebhookConfiguration{}
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: "capsule-validating-webhook-configuration"}, vw)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot retrieve ValidatingWebhookConfiguration")
|
||||
return err
|
||||
}
|
||||
for i, w := range vw.Webhooks {
|
||||
// Updating CABundle only in case of an internal service reference
|
||||
if w.ClientConfig.Service != nil {
|
||||
vw.Webhooks[i].ClientConfig.CABundle = caBundle
|
||||
}
|
||||
}
|
||||
return r.Update(context.TODO(), vw, &client.UpdateOptions{})
|
||||
})
|
||||
}
|
||||
|
||||
//nolint:dupl
|
||||
func (r CAReconciler) UpdateMutatingWebhookConfiguration(caBundle []byte) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
mw := &admissionregistrationv1.MutatingWebhookConfiguration{}
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: "capsule-mutating-webhook-configuration"}, mw)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot retrieve MutatingWebhookConfiguration")
|
||||
return err
|
||||
}
|
||||
for i, w := range mw.Webhooks {
|
||||
// Updating CABundle only in case of an internal service reference
|
||||
if w.ClientConfig.Service != nil {
|
||||
mw.Webhooks[i].ClientConfig.CABundle = caBundle
|
||||
}
|
||||
}
|
||||
return r.Update(context.TODO(), mw, &client.UpdateOptions{})
|
||||
})
|
||||
}
|
||||
|
||||
func (r CAReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||
var err error
|
||||
|
||||
r.Log = r.Log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||
r.Log.Info("Reconciling CA Secret")
|
||||
|
||||
// Fetch the CA instance
|
||||
instance := &corev1.Secret{}
|
||||
err = r.Client.Get(context.TODO(), request.NamespacedName, instance)
|
||||
if err != nil {
|
||||
// Error reading the object - requeue the request.
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
var ca cert.CA
|
||||
var rq time.Duration
|
||||
ca, err = getCertificateAuthority(r.Client, r.Namespace)
|
||||
if err != nil && errors.Is(err, MissingCaError{}) {
|
||||
ca, err = cert.GenerateCertificateAuthority()
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
} else if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
r.Log.Info("Handling CA Secret")
|
||||
|
||||
rq, err = ca.ExpiresIn(time.Now())
|
||||
if err != nil {
|
||||
r.Log.Info("CA is expired, cleaning to obtain a new one")
|
||||
instance.Data = map[string][]byte{}
|
||||
} else {
|
||||
r.Log.Info("Updating CA secret with new PEM and RSA")
|
||||
|
||||
var crt *bytes.Buffer
|
||||
var key *bytes.Buffer
|
||||
crt, _ = ca.CACertificatePem()
|
||||
key, _ = ca.CAPrivateKeyPem()
|
||||
|
||||
instance.Data = map[string][]byte{
|
||||
certSecretKey: crt.Bytes(),
|
||||
privateKeySecretKey: key.Bytes(),
|
||||
}
|
||||
|
||||
group := new(errgroup.Group)
|
||||
group.Go(func() error {
|
||||
return r.UpdateMutatingWebhookConfiguration(crt.Bytes())
|
||||
})
|
||||
group.Go(func() error {
|
||||
return r.UpdateValidatingWebhookConfiguration(crt.Bytes())
|
||||
})
|
||||
group.Go(func() error {
|
||||
return r.UpdateCustomResourceDefinition(crt.Bytes())
|
||||
})
|
||||
|
||||
if err = group.Wait(); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
t := &corev1.Secret{ObjectMeta: instance.ObjectMeta}
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, t, func() error {
|
||||
t.Data = instance.Data
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot update Capsule TLS")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if res == controllerutil.OperationResultUpdated {
|
||||
r.Log.Info("Capsule CA has been updated, we need to trigger TLS update too")
|
||||
tls := &corev1.Secret{}
|
||||
err = r.Get(ctx, types.NamespacedName{
|
||||
Namespace: r.Namespace,
|
||||
Name: tlsSecretName,
|
||||
}, tls)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Capsule TLS Secret missing")
|
||||
}
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, tls, func() error {
|
||||
tls.Data = map[string][]byte{}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot clean Capsule TLS Secret due to CA update")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
r.Log.Info("Reconciliation completed, processing back in " + rq.String())
|
||||
return reconcile.Result{Requeue: true, RequeueAfter: rq}, nil
|
||||
}
|
||||
12
controllers/secret/const.go
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package secret
|
||||
|
||||
const (
|
||||
certSecretKey = "tls.crt"
|
||||
privateKeySecretKey = "tls.key"
|
||||
|
||||
caSecretName = "capsule-ca"
|
||||
tlsSecretName = "capsule-tls"
|
||||
)
|
||||
11
controllers/secret/errors.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package secret
|
||||
|
||||
type MissingCaError struct {
|
||||
}
|
||||
|
||||
func (MissingCaError) Error() string {
|
||||
return "CA has not been created yet, please generate a new"
|
||||
}
|
||||
62
controllers/secret/reconciler.go
Normal file
@@ -0,0 +1,62 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package secret
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
"github.com/clastix/capsule/pkg/cert"
|
||||
)
|
||||
|
||||
func getCertificateAuthority(client client.Client, namespace string) (ca cert.CA, err error) {
|
||||
instance := &corev1.Secret{}
|
||||
|
||||
err = client.Get(context.TODO(), types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: caSecretName,
|
||||
}, instance)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("missing secret %s, cannot reconcile", caSecretName)
|
||||
}
|
||||
|
||||
if instance.Data == nil {
|
||||
return nil, MissingCaError{}
|
||||
}
|
||||
|
||||
ca, err = cert.NewCertificateAuthorityFromBytes(instance.Data[certSecretKey], instance.Data[privateKeySecretKey])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func forOptionPerInstanceName(instanceName string) builder.ForOption {
|
||||
return builder.WithPredicates(predicate.Funcs{
|
||||
CreateFunc: func(event event.CreateEvent) bool {
|
||||
return filterByName(event.Object.GetName(), instanceName)
|
||||
},
|
||||
DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
|
||||
return filterByName(deleteEvent.Object.GetName(), instanceName)
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
|
||||
return filterByName(updateEvent.ObjectNew.GetName(), instanceName)
|
||||
},
|
||||
GenericFunc: func(genericEvent event.GenericEvent) bool {
|
||||
return filterByName(genericEvent.Object.GetName(), instanceName)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func filterByName(objName, desired string) bool {
|
||||
return objName == desired
|
||||
}
|
||||
152
controllers/secret/tls.go
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package secret
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/clastix/capsule/pkg/cert"
|
||||
)
|
||||
|
||||
type TLSReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Namespace string
|
||||
}
|
||||
|
||||
func (r *TLSReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&corev1.Secret{}, forOptionPerInstanceName(tlsSecretName)).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r TLSReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||
var err error
|
||||
|
||||
r.Log = r.Log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||
r.Log.Info("Reconciling TLS Secret")
|
||||
|
||||
// Fetch the Secret instance
|
||||
instance := &corev1.Secret{}
|
||||
err = r.Get(ctx, request.NamespacedName, instance)
|
||||
if err != nil {
|
||||
// Error reading the object - requeue the request.
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
var ca cert.CA
|
||||
var rq time.Duration
|
||||
|
||||
ca, err = getCertificateAuthority(r.Client, r.Namespace)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
var shouldCreate bool
|
||||
for _, key := range []string{certSecretKey, privateKeySecretKey} {
|
||||
if _, ok := instance.Data[key]; !ok {
|
||||
shouldCreate = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if shouldCreate {
|
||||
r.Log.Info("Missing Capsule TLS certificate")
|
||||
rq = 6 * 30 * 24 * time.Hour
|
||||
|
||||
opts := cert.NewCertOpts(time.Now().Add(rq), fmt.Sprintf("capsule-webhook-service.%s.svc", r.Namespace))
|
||||
var crt, key *bytes.Buffer
|
||||
crt, key, err = ca.GenerateCertificate(opts)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot generate new TLS certificate")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
instance.Data = map[string][]byte{
|
||||
certSecretKey: crt.Bytes(),
|
||||
privateKeySecretKey: key.Bytes(),
|
||||
}
|
||||
} else {
|
||||
var c *x509.Certificate
|
||||
var b *pem.Block
|
||||
b, _ = pem.Decode(instance.Data[certSecretKey])
|
||||
c, err = x509.ParseCertificate(b.Bytes)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot parse Capsule TLS")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
rq = time.Until(c.NotAfter)
|
||||
|
||||
err = ca.ValidateCert(c)
|
||||
if err != nil {
|
||||
r.Log.Info("Capsule TLS is expired or invalid, cleaning to obtain a new one")
|
||||
instance.Data = map[string][]byte{}
|
||||
}
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
t := &corev1.Secret{ObjectMeta: instance.ObjectMeta}
|
||||
res, err = controllerutil.CreateOrUpdate(ctx, r.Client, t, func() error {
|
||||
t.Data = instance.Data
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot update Capsule TLS")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if instance.Name == tlsSecretName && res == controllerutil.OperationResultUpdated {
|
||||
r.Log.Info("Capsule TLS certificates has been updated, Controller pods must be restarted to load new certificate")
|
||||
|
||||
hostname, _ := os.Hostname()
|
||||
leaderPod := &corev1.Pod{}
|
||||
if err = r.Client.Get(ctx, types.NamespacedName{Namespace: os.Getenv("NAMESPACE"), Name: hostname}, leaderPod); err != nil {
|
||||
r.Log.Error(err, "cannot retrieve the leader Pod, probably running in out of the cluster mode")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
podList := &corev1.PodList{}
|
||||
if err = r.Client.List(ctx, podList, client.MatchingLabels(leaderPod.ObjectMeta.Labels)); err != nil {
|
||||
r.Log.Error(err, "cannot retrieve list of Capsule pods requiring restart upon TLS update")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
for _, p := range podList.Items {
|
||||
nonLeaderPod := p
|
||||
// Skipping this Pod, must be deleted at the end
|
||||
if nonLeaderPod.GetName() == leaderPod.GetName() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err = r.Client.Delete(ctx, &nonLeaderPod); err != nil {
|
||||
r.Log.Error(err, "cannot delete the non-leader Pod due to TLS update")
|
||||
}
|
||||
}
|
||||
|
||||
if err = r.Client.Delete(ctx, leaderPod); err != nil {
|
||||
r.Log.Error(err, "cannot delete the leader Pod due to TLS update")
|
||||
}
|
||||
}
|
||||
|
||||
r.Log.Info("Reconciliation completed, processing back in " + rq.String())
|
||||
return reconcile.Result{Requeue: true, RequeueAfter: rq}, nil
|
||||
}
|
||||
@@ -8,15 +8,15 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
@@ -27,39 +27,34 @@ type abstractServiceLabelsReconciler struct {
|
||||
obj client.Object
|
||||
client client.Client
|
||||
log logr.Logger
|
||||
scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (r *abstractServiceLabelsReconciler) InjectClient(c client.Client) error {
|
||||
r.client = c
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *abstractServiceLabelsReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||
tenant, err := r.getTenant(ctx, request.NamespacedName, r.client)
|
||||
if err != nil {
|
||||
if errors.As(err, &NonTenantObjectError{}) || errors.As(err, &NoServicesMetadataError{}) {
|
||||
switch err.(type) {
|
||||
case *NonTenantObject, *NoServicesMetadata:
|
||||
return reconcile.Result{}, nil
|
||||
default:
|
||||
r.log.Error(err, fmt.Sprintf("Cannot sync %t labels", r.obj))
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
r.log.Error(err, fmt.Sprintf("Cannot sync %T %s/%s labels", r.obj, r.obj.GetNamespace(), r.obj.GetName()))
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
err = r.client.Get(ctx, request.NamespacedName, r.obj)
|
||||
if err != nil {
|
||||
if apierr.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.client, r.obj, func() (err error) {
|
||||
r.obj.SetLabels(r.sync(r.obj.GetLabels(), tenant.Spec.ServiceOptions.AdditionalMetadata.Labels))
|
||||
r.obj.SetAnnotations(r.sync(r.obj.GetAnnotations(), tenant.Spec.ServiceOptions.AdditionalMetadata.Annotations))
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -102,23 +97,32 @@ func (r *abstractServiceLabelsReconciler) sync(available map[string]string, tena
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return available
|
||||
}
|
||||
|
||||
func (r *abstractServiceLabelsReconciler) forOptionPerInstanceName(ctx context.Context) builder.ForOption {
|
||||
return builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return r.IsNamespaceInTenant(ctx, object.GetNamespace())
|
||||
}))
|
||||
func (r *abstractServiceLabelsReconciler) forOptionPerInstanceName() builder.ForOption {
|
||||
return builder.WithPredicates(predicate.Funcs{
|
||||
CreateFunc: func(event event.CreateEvent) bool {
|
||||
return r.IsNamespaceInTenant(event.Object.GetNamespace())
|
||||
},
|
||||
DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
|
||||
return r.IsNamespaceInTenant(deleteEvent.Object.GetNamespace())
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
|
||||
return r.IsNamespaceInTenant(updateEvent.ObjectNew.GetNamespace())
|
||||
},
|
||||
GenericFunc: func(genericEvent event.GenericEvent) bool {
|
||||
return r.IsNamespaceInTenant(genericEvent.Object.GetNamespace())
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (r *abstractServiceLabelsReconciler) IsNamespaceInTenant(ctx context.Context, namespace string) bool {
|
||||
func (r *abstractServiceLabelsReconciler) IsNamespaceInTenant(namespace string) bool {
|
||||
tl := &capsulev1beta1.TenantList{}
|
||||
if err := r.client.List(ctx, tl, client.MatchingFieldsSelector{
|
||||
if err := r.client.List(context.Background(), tl, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(".status.namespaces", namespace),
|
||||
}); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return len(tl.Items) > 0
|
||||
}
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
package servicelabels
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -17,13 +15,14 @@ type EndpointsLabelsReconciler struct {
|
||||
Log logr.Logger
|
||||
}
|
||||
|
||||
func (r *EndpointsLabelsReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {
|
||||
func (r *EndpointsLabelsReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
r.abstractServiceLabelsReconciler = abstractServiceLabelsReconciler{
|
||||
obj: &corev1.Endpoints{},
|
||||
log: r.Log,
|
||||
obj: &corev1.Endpoints{},
|
||||
scheme: mgr.GetScheme(),
|
||||
log: r.Log,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(r.abstractServiceLabelsReconciler.obj, r.abstractServiceLabelsReconciler.forOptionPerInstanceName(ctx)).
|
||||
For(r.abstractServiceLabelsReconciler.obj, r.abstractServiceLabelsReconciler.forOptionPerInstanceName()).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
package servicelabels
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
@@ -16,19 +14,20 @@ type EndpointSlicesLabelsReconciler struct {
|
||||
abstractServiceLabelsReconciler
|
||||
|
||||
Log logr.Logger
|
||||
VersionMinor uint
|
||||
VersionMajor uint
|
||||
VersionMinor int
|
||||
VersionMajor int
|
||||
}
|
||||
|
||||
func (r *EndpointSlicesLabelsReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {
|
||||
func (r *EndpointSlicesLabelsReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
r.scheme = mgr.GetScheme()
|
||||
r.abstractServiceLabelsReconciler = abstractServiceLabelsReconciler{
|
||||
log: r.Log,
|
||||
scheme: mgr.GetScheme(),
|
||||
log: r.Log,
|
||||
}
|
||||
|
||||
switch {
|
||||
case r.VersionMajor == 1 && r.VersionMinor <= 16:
|
||||
r.Log.Info("Skipping controller setup, as EndpointSlices are not supported on current kubernetes version", "VersionMajor", r.VersionMajor, "VersionMinor", r.VersionMinor)
|
||||
|
||||
return nil
|
||||
case r.VersionMajor == 1 && r.VersionMinor >= 21:
|
||||
r.abstractServiceLabelsReconciler.obj = &discoveryv1.EndpointSlice{}
|
||||
@@ -37,6 +36,6 @@ func (r *EndpointSlicesLabelsReconciler) SetupWithManager(ctx context.Context, m
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(r.obj, r.abstractServiceLabelsReconciler.forOptionPerInstanceName(ctx)).
|
||||
For(r.obj, r.abstractServiceLabelsReconciler.forOptionPerInstanceName()).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -5,26 +5,26 @@ package servicelabels
|
||||
|
||||
import "fmt"
|
||||
|
||||
type NonTenantObjectError struct {
|
||||
type NonTenantObject struct {
|
||||
objectName string
|
||||
}
|
||||
|
||||
func NewNonTenantObject(objectName string) error {
|
||||
return &NonTenantObjectError{objectName: objectName}
|
||||
return &NonTenantObject{objectName: objectName}
|
||||
}
|
||||
|
||||
func (n NonTenantObjectError) Error() string {
|
||||
func (n NonTenantObject) Error() string {
|
||||
return fmt.Sprintf("Skipping labels sync for %s as it doesn't belong to tenant", n.objectName)
|
||||
}
|
||||
|
||||
type NoServicesMetadataError struct {
|
||||
type NoServicesMetadata struct {
|
||||
objectName string
|
||||
}
|
||||
|
||||
func NewNoServicesMetadata(objectName string) error {
|
||||
return &NoServicesMetadataError{objectName: objectName}
|
||||
return &NoServicesMetadata{objectName: objectName}
|
||||
}
|
||||
|
||||
func (n NoServicesMetadataError) Error() string {
|
||||
func (n NoServicesMetadata) Error() string {
|
||||
return fmt.Sprintf("Skipping labels sync for %s because no AdditionalLabels or AdditionalAnnotations presents in Tenant spec", n.objectName)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
package servicelabels
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -17,13 +15,13 @@ type ServicesLabelsReconciler struct {
|
||||
Log logr.Logger
|
||||
}
|
||||
|
||||
func (r *ServicesLabelsReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {
|
||||
func (r *ServicesLabelsReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
r.abstractServiceLabelsReconciler = abstractServiceLabelsReconciler{
|
||||
obj: &corev1.Service{},
|
||||
log: r.Log,
|
||||
obj: &corev1.Service{},
|
||||
scheme: mgr.GetScheme(),
|
||||
log: r.Log,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(r.abstractServiceLabelsReconciler.obj, r.abstractServiceLabelsReconciler.forOptionPerInstanceName(ctx)).
|
||||
For(r.abstractServiceLabelsReconciler.obj, r.abstractServiceLabelsReconciler.forOptionPerInstanceName()).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -13,9 +13,8 @@ import (
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// nolint:dupl
|
||||
// Ensuring all the LimitRange are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncLimitRanges(ctx context.Context, tenant *capsulev1beta1.Tenant) error {
|
||||
func (r *Manager) syncLimitRanges(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested LimitRange keys
|
||||
keys := make([]string, 0, len(tenant.Spec.LimitRanges.Items))
|
||||
|
||||
@@ -29,27 +28,26 @@ func (r *Manager) syncLimitRanges(ctx context.Context, tenant *capsulev1beta1.Te
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncLimitRange(ctx, tenant, namespace, keys)
|
||||
return r.syncLimitRange(tenant, namespace, keys)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncLimitRange(ctx context.Context, tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
func (r *Manager) syncLimitRange(tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
// getting LimitRange labels for the mutateFn
|
||||
var tenantLabel, limitRangeLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
if limitRangeLabel, err = capsulev1beta1.GetTypeLabel(&corev1.LimitRange{}); err != nil {
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.pruningResources(ctx, namespace, keys, &corev1.LimitRange{}); err != nil {
|
||||
return err
|
||||
if err = r.pruningResources(namespace, keys, &corev1.LimitRange{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i, spec := range tenant.Spec.LimitRanges.Items {
|
||||
@@ -61,24 +59,22 @@ func (r *Manager) syncLimitRange(ctx context.Context, tenant *capsulev1beta1.Ten
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(ctx, r.Client, target, func() (err error) {
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.ObjectMeta.Labels = map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
limitRangeLabel: strconv.Itoa(i),
|
||||
}
|
||||
target.Spec = spec
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Client.Scheme())
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring LimitRange %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("LimitRange sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -20,9 +20,9 @@ import (
|
||||
|
||||
type Manager struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
RESTConfig *rest.Config
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func (r *Manager) SetupWithManager(mgr ctrl.Manager) error {
|
||||
@@ -38,96 +38,77 @@ func (r *Manager) SetupWithManager(mgr ctrl.Manager) error {
|
||||
|
||||
func (r Manager) Reconcile(ctx context.Context, request ctrl.Request) (result ctrl.Result, err error) {
|
||||
r.Log = r.Log.WithValues("Request.Name", request.Name)
|
||||
|
||||
// Fetch the Tenant instance
|
||||
instance := &capsulev1beta1.Tenant{}
|
||||
if err = r.Get(ctx, request.NamespacedName, instance); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
if errors.IsNotFound(err) {
|
||||
r.Log.Info("Request object not found, could have been deleted after reconcile request")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
r.Log.Error(err, "Error reading the object")
|
||||
|
||||
return
|
||||
}
|
||||
// Ensuring the Tenant Status
|
||||
if err = r.updateTenantStatus(ctx, instance); err != nil {
|
||||
if err = r.updateTenantStatus(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot update Tenant status")
|
||||
|
||||
return
|
||||
}
|
||||
// Ensuring ResourceQuota
|
||||
r.Log.Info("Ensuring limit resources count is updated")
|
||||
|
||||
if err = r.syncCustomResourceQuotaUsages(ctx, instance); err != nil {
|
||||
r.Log.Error(err, "Cannot count limited resources")
|
||||
|
||||
return
|
||||
}
|
||||
// Ensuring all namespaces are collected
|
||||
r.Log.Info("Ensuring all Namespaces are collected")
|
||||
|
||||
if err = r.collectNamespaces(ctx, instance); err != nil {
|
||||
if err = r.collectNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot collect Namespace resources")
|
||||
|
||||
return
|
||||
}
|
||||
// Ensuring Namespace metadata
|
||||
|
||||
r.Log.Info("Starting processing of Namespaces", "items", len(instance.Status.Namespaces))
|
||||
|
||||
if err = r.syncNamespaces(ctx, instance); err != nil {
|
||||
if err = r.syncNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace items")
|
||||
|
||||
return
|
||||
}
|
||||
// Ensuring NetworkPolicy resources
|
||||
|
||||
r.Log.Info("Starting processing of Network Policies")
|
||||
|
||||
if err = r.syncNetworkPolicies(ctx, instance); err != nil {
|
||||
if err = r.syncNetworkPolicies(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync NetworkPolicy items")
|
||||
|
||||
return
|
||||
}
|
||||
// Ensuring LimitRange resources
|
||||
|
||||
r.Log.Info("Starting processing of Limit Ranges", "items", len(instance.Spec.LimitRanges.Items))
|
||||
|
||||
if err = r.syncLimitRanges(ctx, instance); err != nil {
|
||||
if err = r.syncLimitRanges(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync LimitRange items")
|
||||
|
||||
return
|
||||
}
|
||||
// Ensuring ResourceQuota resources
|
||||
|
||||
r.Log.Info("Starting processing of Resource Quotas", "items", len(instance.Spec.ResourceQuota.Items))
|
||||
|
||||
if err = r.syncResourceQuotas(ctx, instance); err != nil {
|
||||
if err = r.syncResourceQuotas(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync ResourceQuota items")
|
||||
|
||||
return
|
||||
}
|
||||
// Ensuring RoleBinding resources
|
||||
r.Log.Info("Ensuring RoleBindings for Owners and Tenant")
|
||||
|
||||
if err = r.syncRoleBindings(ctx, instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync RoleBindings items")
|
||||
|
||||
r.Log.Info("Ensuring additional RoleBindings for owner")
|
||||
if err = r.syncAdditionalRoleBindings(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync additional RoleBindings items")
|
||||
return
|
||||
}
|
||||
// Ensuring Namespace count
|
||||
|
||||
r.Log.Info("Ensuring RoleBinding for owner")
|
||||
if err = r.ownerRoleBinding(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync owner RoleBinding")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring Namespace count")
|
||||
|
||||
if err = r.ensureNamespaceCount(ctx, instance); err != nil {
|
||||
if err = r.ensureNamespaceCount(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace count")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Tenant reconciling completed")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
func (r *Manager) updateTenantStatus(ctx context.Context, tnt *capsulev1beta1.Tenant) error {
|
||||
func (r *Manager) updateTenantStatus(tnt *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
if tnt.IsCordoned() {
|
||||
tnt.Status.State = capsulev1beta1.TenantStateCordoned
|
||||
@@ -135,6 +116,6 @@ func (r *Manager) updateTenantStatus(ctx context.Context, tnt *capsulev1beta1.Te
|
||||
tnt.Status.State = capsulev1beta1.TenantStateActive
|
||||
}
|
||||
|
||||
return r.Client.Status().Update(ctx, tnt)
|
||||
return r.Client.Status().Update(context.Background(), tnt)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -20,39 +20,37 @@ import (
|
||||
)
|
||||
|
||||
// Ensuring all annotations are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncNamespaces(ctx context.Context, tenant *capsulev1beta1.Tenant) (err error) {
|
||||
func (r *Manager) syncNamespaces(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, item := range tenant.Status.Namespaces {
|
||||
namespace := item
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncNamespaceMetadata(ctx, namespace, tenant)
|
||||
return r.syncNamespaceMetadata(namespace, tenant)
|
||||
})
|
||||
}
|
||||
|
||||
if err = group.Wait(); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespaces")
|
||||
|
||||
err = fmt.Errorf("cannot sync Namespaces: %w", err)
|
||||
err = fmt.Errorf("cannot sync Namespaces: %s", err.Error())
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// nolint:gocognit
|
||||
func (r *Manager) syncNamespaceMetadata(ctx context.Context, namespace string, tnt *capsulev1beta1.Tenant) (err error) {
|
||||
func (r *Manager) syncNamespaceMetadata(namespace string, tnt *capsulev1beta1.Tenant) (err error) {
|
||||
var res controllerutil.OperationResult
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (conflictErr error) {
|
||||
ns := &corev1.Namespace{}
|
||||
if conflictErr = r.Client.Get(ctx, types.NamespacedName{Name: namespace}, ns); err != nil {
|
||||
if conflictErr = r.Client.Get(context.TODO(), types.NamespacedName{Name: namespace}, ns); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
capsuleLabel, _ := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
|
||||
res, conflictErr = controllerutil.CreateOrUpdate(ctx, r.Client, ns, func() error {
|
||||
res, conflictErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, ns, func() error {
|
||||
annotations := make(map[string]string)
|
||||
labels := map[string]string{
|
||||
"name": namespace,
|
||||
@@ -146,28 +144,28 @@ func (r *Manager) syncNamespaceMetadata(ctx context.Context, namespace string, t
|
||||
|
||||
r.emitEvent(tnt, namespace, res, "Ensuring Namespace metadata", err)
|
||||
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Manager) ensureNamespaceCount(ctx context.Context, tenant *capsulev1beta1.Tenant) error {
|
||||
func (r *Manager) ensureNamespaceCount(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
tenant.Status.Size = uint(len(tenant.Status.Namespaces))
|
||||
|
||||
found := &capsulev1beta1.Tenant{}
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{Name: tenant.GetName()}, found); err != nil {
|
||||
if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: tenant.GetName()}, found); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
found.Status.Size = tenant.Status.Size
|
||||
|
||||
return r.Client.Status().Update(ctx, found, &client.UpdateOptions{})
|
||||
return r.Client.Status().Update(context.TODO(), found, &client.UpdateOptions{})
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Manager) collectNamespaces(ctx context.Context, tenant *capsulev1beta1.Tenant) error {
|
||||
func (r *Manager) collectNamespaces(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
list := &corev1.NamespaceList{}
|
||||
err = r.Client.List(ctx, list, client.MatchingFieldsSelector{
|
||||
err = r.Client.List(context.TODO(), list, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(".metadata.ownerReferences[*].capsule", tenant.GetName()),
|
||||
})
|
||||
|
||||
@@ -175,12 +173,11 @@ func (r *Manager) collectNamespaces(ctx context.Context, tenant *capsulev1beta1.
|
||||
return
|
||||
}
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, tenant.DeepCopy(), func() error {
|
||||
_, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, tenant.DeepCopy(), func() error {
|
||||
tenant.AssignNamespaces(list.Items)
|
||||
|
||||
return r.Client.Status().Update(ctx, tenant, &client.UpdateOptions{})
|
||||
return r.Client.Status().Update(context.TODO(), tenant, &client.UpdateOptions{})
|
||||
})
|
||||
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
@@ -13,9 +13,8 @@ import (
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// nolint:dupl
|
||||
// Ensuring all the NetworkPolicies are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncNetworkPolicies(ctx context.Context, tenant *capsulev1beta1.Tenant) error {
|
||||
func (r *Manager) syncNetworkPolicies(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested NetworkPolicy keys
|
||||
keys := make([]string, 0, len(tenant.Spec.NetworkPolicies.Items))
|
||||
|
||||
@@ -29,26 +28,26 @@ func (r *Manager) syncNetworkPolicies(ctx context.Context, tenant *capsulev1beta
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncNetworkPolicy(ctx, tenant, namespace, keys)
|
||||
return r.syncNetworkPolicy(tenant, namespace, keys)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncNetworkPolicy(ctx context.Context, tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
if err = r.pruningResources(ctx, namespace, keys, &networkingv1.NetworkPolicy{}); err != nil {
|
||||
return err
|
||||
func (r *Manager) syncNetworkPolicy(tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
if err = r.pruningResources(namespace, keys, &networkingv1.NetworkPolicy{}); err != nil {
|
||||
return
|
||||
}
|
||||
// getting NetworkPolicy labels for the mutateFn
|
||||
var tenantLabel, networkPolicyLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
if networkPolicyLabel, err = capsulev1beta1.GetTypeLabel(&networkingv1.NetworkPolicy{}); err != nil {
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
for i, spec := range tenant.Spec.NetworkPolicies.Items {
|
||||
@@ -60,14 +59,14 @@ func (r *Manager) syncNetworkPolicy(ctx context.Context, tenant *capsulev1beta1.
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(ctx, r.Client, target, func() (err error) {
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.SetLabels(map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
networkPolicyLabel: strconv.Itoa(i),
|
||||
})
|
||||
target.Spec = spec
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Client.Scheme())
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring NetworkPolicy %s", target.GetName()), err)
|
||||
@@ -75,9 +74,9 @@ func (r *Manager) syncNetworkPolicy(ctx context.Context, tenant *capsulev1beta1.
|
||||
r.Log.Info("Network Policy sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
@@ -31,8 +31,7 @@ import (
|
||||
// the mutateFn along with the CreateOrUpdate to don't perform the update since resources are identical.
|
||||
//
|
||||
// In case of Namespace-scoped Resource Budget, we're just replicating the resources across all registered Namespaces.
|
||||
// nolint:gocognit
|
||||
func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta1.Tenant) (err error) {
|
||||
func (r *Manager) syncResourceQuotas(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
// getting ResourceQuota labels for the mutateFn
|
||||
var tenantLabel, typeLabel string
|
||||
|
||||
@@ -43,7 +42,7 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta1
|
||||
if typeLabel, err = capsulev1beta1.GetTypeLabel(&corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
// nolint:nestif
|
||||
|
||||
if tenant.Spec.ResourceQuota.Scope == capsulev1beta1.ResourceQuotaScopeTenant {
|
||||
group := new(errgroup.Group)
|
||||
|
||||
@@ -68,9 +67,8 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta1
|
||||
// These are required since Capsule is going to sum all the used quota to
|
||||
// sum them and get the Tenant one.
|
||||
list := &corev1.ResourceQuotaList{}
|
||||
if scopeErr = r.List(ctx, list, &client.ListOptions{LabelSelector: labels.NewSelector().Add(*tntRequirement).Add(*indexRequirement)}); scopeErr != nil {
|
||||
if scopeErr = r.List(context.TODO(), list, &client.ListOptions{LabelSelector: labels.NewSelector().Add(*tntRequirement).Add(*indexRequirement)}); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "Cannot list ResourceQuota", "tenantFilter", tntRequirement.String(), "indexFilter", indexRequirement.String())
|
||||
|
||||
return
|
||||
}
|
||||
// Iterating over all the options declared for the ResourceQuota,
|
||||
@@ -118,13 +116,11 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta1
|
||||
list.Items[item].Spec.Hard[name] = resourceQuota.Hard[name]
|
||||
}
|
||||
}
|
||||
if scopeErr = r.resourceQuotasUpdate(ctx, name, quantity, resourceQuota.Hard[name], list.Items...); scopeErr != nil {
|
||||
if scopeErr = r.resourceQuotasUpdate(name, quantity, resourceQuota.Hard[name], list.Items...); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "cannot proceed with outer ResourceQuota")
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
})
|
||||
}
|
||||
@@ -146,14 +142,14 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta1
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncResourceQuota(ctx, tenant, namespace, keys)
|
||||
return r.syncResourceQuota(tenant, namespace, keys)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncResourceQuota(ctx context.Context, tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
func (r *Manager) syncResourceQuota(tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
// getting ResourceQuota labels for the mutateFn
|
||||
var tenantLabel, typeLabel string
|
||||
|
||||
@@ -165,7 +161,7 @@ func (r *Manager) syncResourceQuota(ctx context.Context, tenant *capsulev1beta1.
|
||||
return err
|
||||
}
|
||||
// Pruning resource of non-requested resources
|
||||
if err = r.pruningResources(ctx, namespace, keys, &corev1.ResourceQuota{}); err != nil {
|
||||
if err = r.pruningResources(namespace, keys, &corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -178,9 +174,8 @@ func (r *Manager) syncResourceQuota(ctx context.Context, tenant *capsulev1beta1.
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) {
|
||||
res, retryErr = controllerutil.CreateOrUpdate(ctx, r.Client, target, func() (err error) {
|
||||
res, retryErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.SetLabels(map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
typeLabel: strconv.Itoa(index),
|
||||
@@ -192,7 +187,7 @@ func (r *Manager) syncResourceQuota(ctx context.Context, tenant *capsulev1beta1.
|
||||
target.Spec.Hard = resQuota.Hard
|
||||
}
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Client.Scheme())
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
return retryErr
|
||||
@@ -213,7 +208,7 @@ func (r *Manager) syncResourceQuota(ctx context.Context, tenant *capsulev1beta1.
|
||||
// Serial ResourceQuota processing is expensive: using Go routines we can speed it up.
|
||||
// In case of multiple errors these are logged properly, returning a generic error since we have to repush back the
|
||||
// reconciliation loop.
|
||||
func (r *Manager) resourceQuotasUpdate(ctx context.Context, resourceName corev1.ResourceName, actual, limit resource.Quantity, list ...corev1.ResourceQuota) (err error) {
|
||||
func (r *Manager) resourceQuotasUpdate(resourceName corev1.ResourceName, actual, limit resource.Quantity, list ...corev1.ResourceQuota) (err error) {
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, item := range list {
|
||||
@@ -221,12 +216,12 @@ func (r *Manager) resourceQuotasUpdate(ctx context.Context, resourceName corev1.
|
||||
|
||||
group.Go(func() (err error) {
|
||||
found := &corev1.ResourceQuota{}
|
||||
if err = r.Get(ctx, types.NamespacedName{Namespace: rq.Namespace, Name: rq.Name}, found); err != nil {
|
||||
if err = r.Get(context.TODO(), types.NamespacedName{Namespace: rq.Namespace, Name: rq.Name}, found); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) {
|
||||
_, retryErr = controllerutil.CreateOrUpdate(ctx, r.Client, found, func() error {
|
||||
_, retryErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, found, func() error {
|
||||
// Ensuring annotation map is there to avoid uninitialized map error and
|
||||
// assigning the overall usage
|
||||
if found.Annotations == nil {
|
||||
@@ -237,7 +232,6 @@ func (r *Manager) resourceQuotasUpdate(ctx context.Context, resourceName corev1.
|
||||
found.Annotations[capsulev1beta1.HardQuotaFor(resourceName)] = limit.String()
|
||||
// Updating the Resource according to the actual.Cmp result
|
||||
found.Spec.Hard = rq.Spec.Hard
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -250,7 +244,7 @@ func (r *Manager) resourceQuotasUpdate(ctx context.Context, resourceName corev1.
|
||||
// We had an error and we mark the whole transaction as failed
|
||||
// to process it another time according to the Tenant controller back-off factor.
|
||||
r.Log.Error(err, "Cannot update outer ResourceQuotas", "resourceName", resourceName.String())
|
||||
err = fmt.Errorf("update of outer ResourceQuota items has failed: %w", err)
|
||||
err = fmt.Errorf("update of outer ResourceQuota items has failed: %s", err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
|
||||
@@ -1,122 +0,0 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
func (r *Manager) syncCustomResourceQuotaUsages(ctx context.Context, tenant *capsulev1beta1.Tenant) error {
|
||||
type resource struct {
|
||||
kind string
|
||||
group string
|
||||
version string
|
||||
}
|
||||
// nolint:prealloc
|
||||
var resourceList []resource
|
||||
|
||||
for k := range tenant.GetAnnotations() {
|
||||
if !strings.HasPrefix(k, capsulev1beta1.ResourceQuotaAnnotationPrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.Split(k, "/")
|
||||
if len(parts) != 2 {
|
||||
r.Log.Info("non well-formed Resource Limit annotation", "key", k)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
parts = strings.Split(parts[1], "_")
|
||||
|
||||
if len(parts) != 2 {
|
||||
r.Log.Info("non well-formed Resource Limit annotation, cannot retrieve version", "key", k)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
groupKindParts := strings.Split(parts[0], ".")
|
||||
if len(groupKindParts) < 2 {
|
||||
r.Log.Info("non well-formed Resource Limit annotation, cannot retrieve kind and group", "key", k)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
resourceList = append(resourceList, resource{
|
||||
kind: groupKindParts[0],
|
||||
group: strings.Join(groupKindParts[1:], "."),
|
||||
version: parts[1],
|
||||
})
|
||||
}
|
||||
|
||||
errGroup := new(errgroup.Group)
|
||||
|
||||
usedMap := make(map[string]int)
|
||||
|
||||
defer func() {
|
||||
for gvk, used := range usedMap {
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) {
|
||||
tnt := &capsulev1beta1.Tenant{}
|
||||
if retryErr = r.Client.Get(ctx, types.NamespacedName{Name: tenant.GetName()}, tnt); retryErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if tnt.GetAnnotations() == nil {
|
||||
tnt.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
tnt.Annotations[capsulev1beta1.UsedAnnotationForResource(gvk)] = fmt.Sprintf("%d", used)
|
||||
|
||||
return r.Client.Update(ctx, tnt)
|
||||
})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot update custom Resource Quota", "GVK", gvk)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for _, item := range resourceList {
|
||||
res := item
|
||||
|
||||
errGroup.Go(func() (scopeErr error) {
|
||||
dynamicClient := dynamic.NewForConfigOrDie(r.RESTConfig)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
var list *unstructured.UnstructuredList
|
||||
|
||||
list, scopeErr = dynamicClient.Resource(schema.GroupVersionResource{Group: res.group, Version: res.version, Resource: res.kind}).List(ctx, metav1.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("metadata.namespace==%s", ns),
|
||||
})
|
||||
if scopeErr != nil {
|
||||
return scopeErr
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("%s.%s_%s", res.kind, res.group, res.version)
|
||||
|
||||
if _, ok := usedMap[key]; !ok {
|
||||
usedMap[key] = 0
|
||||
}
|
||||
|
||||
usedMap[key] += len(list.Items)
|
||||
}
|
||||
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
if err := errGroup.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -9,43 +9,16 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
"github.com/clastix/capsule/controllers/rbac"
|
||||
)
|
||||
|
||||
// ownerClusterRoleBindings generates a Capsule AdditionalRoleBinding object for the Owner dynamic clusterrole in order
|
||||
// to take advantage of the additional role binding feature.
|
||||
func (r *Manager) ownerClusterRoleBindings(owner capsulev1beta1.OwnerSpec, clusterRole string) capsulev1beta1.AdditionalRoleBindingsSpec {
|
||||
var subject rbacv1.Subject
|
||||
|
||||
if owner.Kind == "ServiceAccount" {
|
||||
splitName := strings.Split(owner.Name, ":")
|
||||
|
||||
subject = rbacv1.Subject{
|
||||
Kind: owner.Kind.String(),
|
||||
Name: splitName[len(splitName)-1],
|
||||
Namespace: splitName[len(splitName)-2],
|
||||
}
|
||||
} else {
|
||||
subject = rbacv1.Subject{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: owner.Kind.String(),
|
||||
Name: owner.Name,
|
||||
}
|
||||
}
|
||||
|
||||
return capsulev1beta1.AdditionalRoleBindingsSpec{
|
||||
ClusterRoleName: clusterRole,
|
||||
Subjects: []rbacv1.Subject{
|
||||
subject,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Sync the dynamic Tenant Owner specific cluster-roles and additional Role Bindings, which can be used in many ways:
|
||||
// applying Pod Security Policies or giving access to CRDs or specific API groups.
|
||||
func (r *Manager) syncRoleBindings(ctx context.Context, tenant *capsulev1beta1.Tenant) (err error) {
|
||||
// Additional Role Bindings can be used in many ways: applying Pod Security Policies or giving
|
||||
// access to CRDs or specific API groups.
|
||||
func (r *Manager) syncAdditionalRoleBindings(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
// hashing the RoleBinding name due to DNS RFC-1123 applied to Kubernetes labels
|
||||
hashFn := func(binding capsulev1beta1.AdditionalRoleBindingsSpec) string {
|
||||
h := fnv.New64a()
|
||||
@@ -59,16 +32,7 @@ func (r *Manager) syncRoleBindings(ctx context.Context, tenant *capsulev1beta1.T
|
||||
return fmt.Sprintf("%x", h.Sum64())
|
||||
}
|
||||
// getting requested Role Binding keys
|
||||
keys := make([]string, 0, len(tenant.Spec.Owners))
|
||||
// Generating for dynamic tenant owners cluster roles
|
||||
for index, owner := range tenant.Spec.Owners {
|
||||
for _, clusterRoleName := range owner.GetRoles(*tenant, index) {
|
||||
cr := r.ownerClusterRoleBindings(owner, clusterRoleName)
|
||||
|
||||
keys = append(keys, hashFn(cr))
|
||||
}
|
||||
}
|
||||
// Generating hash of additional role bindings
|
||||
var keys []string
|
||||
for _, i := range tenant.Spec.AdditionalRoleBindings {
|
||||
keys = append(keys, hashFn(i))
|
||||
}
|
||||
@@ -79,14 +43,14 @@ func (r *Manager) syncRoleBindings(ctx context.Context, tenant *capsulev1beta1.T
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncAdditionalRoleBinding(ctx, tenant, namespace, keys, hashFn)
|
||||
return r.syncAdditionalRoleBinding(tenant, namespace, keys, hashFn)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncAdditionalRoleBinding(ctx context.Context, tenant *capsulev1beta1.Tenant, ns string, keys []string, hashFn func(binding capsulev1beta1.AdditionalRoleBindingsSpec) string) (err error) {
|
||||
func (r *Manager) syncAdditionalRoleBinding(tenant *capsulev1beta1.Tenant, ns string, keys []string, hashFn func(binding capsulev1beta1.AdditionalRoleBindingsSpec) string) (err error) {
|
||||
var tenantLabel, roleBindingLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
@@ -97,21 +61,11 @@ func (r *Manager) syncAdditionalRoleBinding(ctx context.Context, tenant *capsule
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.pruningResources(ctx, ns, keys, &rbacv1.RoleBinding{}); err != nil {
|
||||
if err = r.pruningResources(ns, keys, &rbacv1.RoleBinding{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var roleBindings []capsulev1beta1.AdditionalRoleBindingsSpec
|
||||
|
||||
for index, owner := range tenant.Spec.Owners {
|
||||
for _, clusterRoleName := range owner.GetRoles(*tenant, index) {
|
||||
roleBindings = append(roleBindings, r.ownerClusterRoleBindings(owner, clusterRoleName))
|
||||
}
|
||||
}
|
||||
|
||||
roleBindings = append(roleBindings, tenant.Spec.AdditionalRoleBindings...)
|
||||
|
||||
for i, roleBinding := range roleBindings {
|
||||
for i, roleBinding := range tenant.Spec.AdditionalRoleBindings {
|
||||
roleBindingHashLabel := hashFn(roleBinding)
|
||||
|
||||
target := &rbacv1.RoleBinding{
|
||||
@@ -122,29 +76,27 @@ func (r *Manager) syncAdditionalRoleBinding(ctx context.Context, tenant *capsule
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(ctx, r.Client, target, func() error {
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() error {
|
||||
target.ObjectMeta.Labels = map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
roleBindingLabel: roleBindingHashLabel,
|
||||
}
|
||||
target.RoleRef = rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: roleBinding.ClusterRoleName,
|
||||
}
|
||||
target.Subjects = roleBinding.Subjects
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Client.Scheme())
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring RoleBinding %s", target.GetName()), err)
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring additional RoleBinding %s", target.GetName()), err)
|
||||
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot sync RoleBinding")
|
||||
r.Log.Error(err, "Cannot sync Additional RoleBinding")
|
||||
}
|
||||
|
||||
r.Log.Info(fmt.Sprintf("RoleBinding sync result: %s", string(res)), "name", target.Name, "namespace", target.Namespace)
|
||||
|
||||
r.Log.Info(fmt.Sprintf("Additional RoleBindings sync result: %s", string(res)), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -152,3 +104,76 @@ func (r *Manager) syncAdditionalRoleBinding(ctx context.Context, tenant *capsule
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Each Tenant owner needs the admin Role attached to each Namespace, otherwise no actions on it can be performed.
|
||||
// Since RBAC is based on deny all first, some specific actions like editing Capsule resources are going to be blocked
|
||||
// via Dynamic Admission Webhooks.
|
||||
// TODO(prometherion): we could create a capsule:admin role rather than hitting webhooks for each action
|
||||
func (r *Manager) ownerRoleBinding(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting RoleBinding label for the mutateFn
|
||||
var subjects []rbacv1.Subject
|
||||
|
||||
tl, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newLabels := map[string]string{tl: tenant.Name}
|
||||
|
||||
for _, owner := range tenant.Spec.Owners {
|
||||
if owner.Kind == rbacv1.ServiceAccountKind {
|
||||
splitName := strings.Split(owner.Name, ":")
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
Kind: owner.Kind.String(),
|
||||
Name: splitName[len(splitName)-1],
|
||||
Namespace: splitName[len(splitName)-2],
|
||||
})
|
||||
} else {
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: owner.Kind.String(),
|
||||
Name: owner.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
list := make(map[types.NamespacedName]rbacv1.RoleRef)
|
||||
|
||||
for _, i := range tenant.Status.Namespaces {
|
||||
list[types.NamespacedName{Namespace: i, Name: "namespace:admin"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "admin",
|
||||
}
|
||||
list[types.NamespacedName{Namespace: i, Name: "namespace-deleter"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: rbac.DeleterRoleName,
|
||||
}
|
||||
}
|
||||
|
||||
for namespacedName, roleRef := range list {
|
||||
target := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespacedName.Name,
|
||||
Namespace: namespacedName.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.ObjectMeta.Labels = newLabels
|
||||
target.Subjects = subjects
|
||||
target.RoleRef = roleRef
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring Capsule RoleBinding %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Role Binding sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -16,9 +16,8 @@ import (
|
||||
|
||||
// pruningResources is taking care of removing the no more requested sub-resources as LimitRange, ResourceQuota or
|
||||
// NetworkPolicy using the "exists" and "notin" LabelSelector to perform an outer-join removal.
|
||||
func (r *Manager) pruningResources(ctx context.Context, ns string, keys []string, obj client.Object) (err error) {
|
||||
func (r *Manager) pruningResources(ns string, keys []string, obj client.Object) (err error) {
|
||||
var capsuleLabel string
|
||||
|
||||
if capsuleLabel, err = capsulev1beta1.GetTypeLabel(obj); err != nil {
|
||||
return
|
||||
}
|
||||
@@ -26,16 +25,13 @@ func (r *Manager) pruningResources(ctx context.Context, ns string, keys []string
|
||||
selector := labels.NewSelector()
|
||||
|
||||
var exists *labels.Requirement
|
||||
|
||||
if exists, err = labels.NewRequirement(capsuleLabel, selection.Exists, []string{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
selector = selector.Add(*exists)
|
||||
|
||||
if len(keys) > 0 {
|
||||
var notIn *labels.Requirement
|
||||
|
||||
if notIn, err = labels.NewRequirement(capsuleLabel, selection.NotIn, keys); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -46,7 +42,7 @@ func (r *Manager) pruningResources(ctx context.Context, ns string, keys []string
|
||||
r.Log.Info("Pruning objects with label selector " + selector.String())
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
return r.DeleteAllOf(ctx, obj, &client.DeleteAllOfOptions{
|
||||
return r.DeleteAllOf(context.TODO(), obj, &client.DeleteAllOfOptions{
|
||||
ListOptions: client.ListOptions{
|
||||
LabelSelector: selector,
|
||||
Namespace: ns,
|
||||
@@ -57,8 +53,7 @@ func (r *Manager) pruningResources(ctx context.Context, ns string, keys []string
|
||||
}
|
||||
|
||||
func (r *Manager) emitEvent(object runtime.Object, namespace string, res controllerutil.OperationResult, msg string, err error) {
|
||||
eventType := corev1.EventTypeNormal
|
||||
|
||||
var eventType = corev1.EventTypeNormal
|
||||
if err != nil {
|
||||
eventType = corev1.EventTypeWarning
|
||||
res = "Error"
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package tls
|
||||
|
||||
type RunningInOutOfClusterModeError struct{}
|
||||
|
||||
func (r RunningInOutOfClusterModeError) Error() string {
|
||||
return "cannot retrieve the leader Pod, probably running in out of the cluster mode"
|
||||
}
|
||||
@@ -1,337 +0,0 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package tls
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/pointer"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
|
||||
"github.com/clastix/capsule/controllers/utils"
|
||||
"github.com/clastix/capsule/pkg/cert"
|
||||
"github.com/clastix/capsule/pkg/configuration"
|
||||
)
|
||||
|
||||
const (
|
||||
certificateExpirationThreshold = 3 * 24 * time.Hour
|
||||
certificateValidity = 6 * 30 * 24 * time.Hour
|
||||
PodUpdateAnnotationName = "capsule.clastix.io/updated"
|
||||
)
|
||||
|
||||
type Reconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Namespace string
|
||||
Configuration configuration.Configuration
|
||||
}
|
||||
|
||||
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
enqueueFn := handler.EnqueueRequestsFromMapFunc(func(client.Object) []reconcile.Request {
|
||||
return []reconcile.Request{
|
||||
{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: r.Namespace,
|
||||
Name: r.Configuration.TLSSecretName(),
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&corev1.Secret{}, utils.NamesMatchingPredicate(r.Configuration.TLSSecretName())).
|
||||
Watches(source.NewKindWithCache(&admissionregistrationv1.ValidatingWebhookConfiguration{}, mgr.GetCache()), enqueueFn, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return object.GetName() == r.Configuration.ValidatingWebhookConfigurationName()
|
||||
}))).
|
||||
Watches(source.NewKindWithCache(&admissionregistrationv1.MutatingWebhookConfiguration{}, mgr.GetCache()), enqueueFn, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return object.GetName() == r.Configuration.MutatingWebhookConfigurationName()
|
||||
}))).
|
||||
Watches(source.NewKindWithCache(&apiextensionsv1.CustomResourceDefinition{}, mgr.GetCache()), enqueueFn, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return object.GetName() == r.Configuration.TenantCRDName()
|
||||
}))).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r Reconciler) ReconcileCertificates(ctx context.Context, certSecret *corev1.Secret) error {
|
||||
if r.shouldUpdateCertificate(certSecret) {
|
||||
r.Log.Info("Generating new TLS certificate")
|
||||
|
||||
ca, err := cert.GenerateCertificateAuthority()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := cert.NewCertOpts(time.Now().Add(certificateValidity), fmt.Sprintf("capsule-webhook-service.%s.svc", r.Namespace))
|
||||
|
||||
crt, key, err := ca.GenerateCertificate(opts)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot generate new TLS certificate")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
caCrt, _ := ca.CACertificatePem()
|
||||
|
||||
certSecret.Data = map[string][]byte{
|
||||
corev1.TLSCertKey: crt.Bytes(),
|
||||
corev1.TLSPrivateKeyKey: key.Bytes(),
|
||||
corev1.ServiceAccountRootCAKey: caCrt.Bytes(),
|
||||
}
|
||||
|
||||
t := &corev1.Secret{ObjectMeta: certSecret.ObjectMeta}
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, t, func() error {
|
||||
t.Data = certSecret.Data
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot update Capsule TLS")
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var caBundle []byte
|
||||
|
||||
var ok bool
|
||||
|
||||
if caBundle, ok = certSecret.Data[corev1.ServiceAccountRootCAKey]; !ok {
|
||||
return fmt.Errorf("missing %s field in %s secret", corev1.ServiceAccountRootCAKey, r.Configuration.TLSSecretName())
|
||||
}
|
||||
|
||||
r.Log.Info("Updating caBundle in webhooks and crd")
|
||||
|
||||
group := new(errgroup.Group)
|
||||
group.Go(func() error {
|
||||
return r.updateMutatingWebhookConfiguration(ctx, caBundle)
|
||||
})
|
||||
group.Go(func() error {
|
||||
return r.updateValidatingWebhookConfiguration(ctx, caBundle)
|
||||
})
|
||||
group.Go(func() error {
|
||||
return r.updateCustomResourceDefinition(ctx, caBundle)
|
||||
})
|
||||
|
||||
operatorPods, err := r.getOperatorPods(ctx)
|
||||
if err != nil {
|
||||
if errors.As(err, &RunningInOutOfClusterModeError{}) {
|
||||
r.Log.Info("skipping annotation of Pods for cert-manager", "error", err.Error())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
r.Log.Info("Updating capsule operator pods")
|
||||
|
||||
for _, pod := range operatorPods.Items {
|
||||
p := pod
|
||||
|
||||
group.Go(func() error {
|
||||
return r.updateOperatorPod(ctx, p)
|
||||
})
|
||||
}
|
||||
|
||||
if err := group.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||
r.Log = r.Log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||
|
||||
certSecret := &corev1.Secret{}
|
||||
|
||||
if err := r.Client.Get(ctx, request.NamespacedName, certSecret); err != nil {
|
||||
// Error reading the object - requeue the request.
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.ReconcileCertificates(ctx, certSecret); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
certificate, err := cert.GetCertificateFromBytes(certSecret.Data[corev1.TLSCertKey])
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
requeueTime := certificate.NotAfter.Add(-(certificateExpirationThreshold - 1*time.Second))
|
||||
rq := requeueTime.Sub(now)
|
||||
|
||||
r.Log.Info("Reconciliation completed, processing back in " + rq.String())
|
||||
|
||||
return reconcile.Result{Requeue: true, RequeueAfter: rq}, nil
|
||||
}
|
||||
|
||||
func (r Reconciler) shouldUpdateCertificate(secret *corev1.Secret) bool {
|
||||
if _, ok := secret.Data[corev1.ServiceAccountRootCAKey]; !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
certificate, key, err := cert.GetCertificateWithPrivateKeyFromBytes(secret.Data[corev1.TLSCertKey], secret.Data[corev1.TLSPrivateKeyKey])
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if err := cert.ValidateCertificate(certificate, key, certificateExpirationThreshold); err != nil {
|
||||
r.Log.Error(err, "failed to validate certificate, generating new one")
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
r.Log.Info("Skipping TLS certificate generation as it is still valid")
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// By default helm doesn't allow to use templates in CRD (https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#method-1-let-helm-do-it-for-you).
|
||||
// In order to overcome this, we are setting conversion strategy in helm chart to None, and then update it with CA and namespace information.
|
||||
func (r *Reconciler) updateCustomResourceDefinition(ctx context.Context, caBundle []byte) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
crd := &apiextensionsv1.CustomResourceDefinition{}
|
||||
err = r.Get(ctx, types.NamespacedName{Name: "tenants.capsule.clastix.io"}, crd)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot retrieve CustomResourceDefinition")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, crd, func() error {
|
||||
crd.Spec.Conversion = &apiextensionsv1.CustomResourceConversion{
|
||||
Strategy: "Webhook",
|
||||
Webhook: &apiextensionsv1.WebhookConversion{
|
||||
ClientConfig: &apiextensionsv1.WebhookClientConfig{
|
||||
Service: &apiextensionsv1.ServiceReference{
|
||||
Namespace: r.Namespace,
|
||||
Name: "capsule-webhook-service",
|
||||
Path: pointer.StringPtr("/convert"),
|
||||
Port: pointer.Int32Ptr(443),
|
||||
},
|
||||
CABundle: caBundle,
|
||||
},
|
||||
ConversionReviewVersions: []string{"v1alpha1", "v1beta1"},
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
//nolint:dupl
|
||||
func (r Reconciler) updateValidatingWebhookConfiguration(ctx context.Context, caBundle []byte) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
vw := &admissionregistrationv1.ValidatingWebhookConfiguration{}
|
||||
err = r.Get(ctx, types.NamespacedName{Name: r.Configuration.ValidatingWebhookConfigurationName()}, vw)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot retrieve ValidatingWebhookConfiguration")
|
||||
|
||||
return err
|
||||
}
|
||||
for i, w := range vw.Webhooks {
|
||||
// Updating CABundle only in case of an internal service reference
|
||||
if w.ClientConfig.Service != nil {
|
||||
vw.Webhooks[i].ClientConfig.CABundle = caBundle
|
||||
}
|
||||
}
|
||||
|
||||
return r.Update(ctx, vw, &client.UpdateOptions{})
|
||||
})
|
||||
}
|
||||
|
||||
//nolint:dupl
|
||||
func (r Reconciler) updateMutatingWebhookConfiguration(ctx context.Context, caBundle []byte) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
mw := &admissionregistrationv1.MutatingWebhookConfiguration{}
|
||||
err = r.Get(ctx, types.NamespacedName{Name: r.Configuration.MutatingWebhookConfigurationName()}, mw)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot retrieve MutatingWebhookConfiguration")
|
||||
|
||||
return err
|
||||
}
|
||||
for i, w := range mw.Webhooks {
|
||||
// Updating CABundle only in case of an internal service reference
|
||||
if w.ClientConfig.Service != nil {
|
||||
mw.Webhooks[i].ClientConfig.CABundle = caBundle
|
||||
}
|
||||
}
|
||||
|
||||
return r.Update(ctx, mw, &client.UpdateOptions{})
|
||||
})
|
||||
}
|
||||
|
||||
func (r Reconciler) updateOperatorPod(ctx context.Context, pod corev1.Pod) error {
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// Need to get latest version of pod
|
||||
p := &corev1.Pod{}
|
||||
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}, p); err != nil && !apierrors.IsNotFound(err) {
|
||||
r.Log.Error(err, "cannot get pod", "name", pod.Name, "namespace", pod.Namespace)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if p.Annotations == nil {
|
||||
p.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
p.Annotations[PodUpdateAnnotationName] = time.Now().Format(time.RFC3339Nano)
|
||||
|
||||
if err := r.Client.Update(ctx, p, &client.UpdateOptions{}); err != nil {
|
||||
r.Log.Error(err, "cannot update pod", "name", pod.Name, "namespace", pod.Namespace)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (r Reconciler) getOperatorPods(ctx context.Context) (*corev1.PodList, error) {
|
||||
hostname, _ := os.Hostname()
|
||||
|
||||
leaderPod := &corev1.Pod{}
|
||||
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{Namespace: os.Getenv("NAMESPACE"), Name: hostname}, leaderPod); err != nil {
|
||||
return nil, RunningInOutOfClusterModeError{}
|
||||
}
|
||||
|
||||
podList := &corev1.PodList{}
|
||||
if err := r.Client.List(ctx, podList, client.MatchingLabels(leaderPod.ObjectMeta.Labels)); err != nil {
|
||||
r.Log.Error(err, "cannot retrieve list of Capsule pods")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return podList, nil
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
)
|
||||
|
||||
func NamesMatchingPredicate(names ...string) builder.Predicates {
|
||||
return builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
for _, name := range names {
|
||||
if object.GetName() == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}))
|
||||
}
|
||||
8
docs/.gitignore
vendored
@@ -1,8 +0,0 @@
|
||||
*.log
|
||||
.cache
|
||||
.DS_Store
|
||||
src/.temp
|
||||
node_modules
|
||||
dist
|
||||
.env
|
||||
.env.*
|
||||
@@ -1,12 +0,0 @@
|
||||
# Capsule Documentation
|
||||
|
||||
1. Ensure to have [`yarn`](https://classic.yarnpkg.com/lang/en/docs/install/#debian-stable) installed in your path.
|
||||
2. `yarn install`
|
||||
|
||||
## Local development
|
||||
|
||||
```shell
|
||||
yarn develop
|
||||
```
|
||||
|
||||
This will create a local webserver listening on `localhost:8080` with hot-reload of your local changes.
|
||||
|
Before Width: | Height: | Size: 4.5 KiB After Width: | Height: | Size: 4.5 KiB |
|
Before Width: | Height: | Size: 111 KiB After Width: | Height: | Size: 111 KiB |
|
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 30 KiB |
|
Before Width: | Height: | Size: 63 KiB After Width: | Height: | Size: 63 KiB |