chore(yamllint): fix yaml comments
Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -9,7 +9,7 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Thanks for taking time reporting a Capsule bug!
|
||||
|
||||
|
||||
-->
|
||||
|
||||
# Bug description
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -32,4 +32,4 @@ How would the new interaction with Capsule look like? E.g.
|
||||
Feel free to add a diagram if that helps explain things.
|
||||
|
||||
# Expected behavior
|
||||
A clear and concise description of what you expect to happen.
|
||||
A clear and concise description of what you expect to happen.
|
||||
|
||||
2
.github/workflows/check-pr.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
name: Validate PR title
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: amannn/action-semantic-pull-request@04501d43b574e4c1d23c629ffe4dcec27acfdeff
|
||||
- uses: amannn/action-semantic-pull-request@335288255954904a41ddda8947c8f2c844b8bfeb
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
4
.github/workflows/coverage.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
with:
|
||||
args: '-no-fail -fmt sarif -out gosec.sarif ./...'
|
||||
- name: Upload SARIF file
|
||||
uses: github/codeql-action/upload-sarif@d26c46acea4065b13fc57703621e0a7c8b9e836b
|
||||
uses: github/codeql-action/upload-sarif@ed51cb5abd90d0e898e492d5e3f24423da71c2fb
|
||||
with:
|
||||
sarif_file: gosec.sarif
|
||||
unit_tests:
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
value: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Upload Report to Codecov
|
||||
if: ${{ steps.checksecret.outputs.result == 'true' }}
|
||||
uses: codecov/codecov-action@0565863a31f2c772f9f0395002a31e3f06189574 # v5.4.0
|
||||
uses: codecov/codecov-action@ad3126e916f78f00edff4ed0317cf185271ccc2d # v5.4.2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
slug: projectcapsule/capsule
|
||||
|
||||
2
.github/workflows/docker-build.yml
vendored
@@ -40,6 +40,6 @@ jobs:
|
||||
# See: https://github.com/aquasecurity/trivy-action/issues/389#issuecomment-2385416577
|
||||
TRIVY_DB_REPOSITORY: 'public.ecr.aws/aquasecurity/trivy-db:2'
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@d26c46acea4065b13fc57703621e0a7c8b9e836b
|
||||
uses: github/codeql-action/upload-sarif@ed51cb5abd90d0e898e492d5e3f24423da71c2fb
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
|
||||
2
.github/workflows/docker-publish.yml
vendored
@@ -36,7 +36,7 @@ jobs:
|
||||
output: 'trivy-results.sarif'
|
||||
severity: 'CRITICAL,HIGH'
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3.8.1
|
||||
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
- name: Publish Capsule
|
||||
id: publish-capsule
|
||||
uses: peak-scale/github-actions/make-ko-publish@a441cca016861c546ab7e065277e40ce41a3eb84 # v0.2.0
|
||||
|
||||
2
.github/workflows/helm-publish.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
chart-digest: ${{ steps.helm_publish.outputs.digest }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3.8.1
|
||||
- uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
- name: "Extract Version"
|
||||
id: extract_version
|
||||
run: |
|
||||
|
||||
9
.github/workflows/releaser.yml
vendored
@@ -11,7 +11,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
#seccomp-generation:
|
||||
# seccomp-generation:
|
||||
# name: Seccomp Generation
|
||||
# strategy:
|
||||
# fail-fast: false
|
||||
@@ -43,9 +43,8 @@ jobs:
|
||||
# with:
|
||||
# name: capsule-seccomp
|
||||
# path: capsule-seccomp.json
|
||||
|
||||
create-release:
|
||||
#needs: seccomp-generation
|
||||
# needs: seccomp-generation
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -64,10 +63,10 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
continue-on-error: true
|
||||
- uses: creekorful/goreportcard-action@1f35ced8cdac2cba28c9a2f2288a16aacfd507f9 # v1.0
|
||||
- uses: anchore/sbom-action/download-syft@5aeee89178a395035617e72a70928596d7ad2a85
|
||||
- uses: anchore/sbom-action/download-syft@9f7302141466aa6482940f15371237e9d9f4c34a
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3.8.1
|
||||
#- name: download artifact
|
||||
# - name: download artifact
|
||||
# uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
|
||||
# with:
|
||||
# name: capsule-seccomp
|
||||
|
||||
2
.github/workflows/scorecard.yml
vendored
@@ -37,6 +37,6 @@ jobs:
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
- name: Upload to code-scanning
|
||||
uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15
|
||||
uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
||||
@@ -56,7 +56,7 @@ release:
|
||||
|
||||
|
||||
Thanks to all the contributors! 🚀 🦄
|
||||
#extra_files:
|
||||
# extra_files:
|
||||
# - glob: ./capsule-seccomp.json
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
|
||||
@@ -39,4 +39,4 @@ repos:
|
||||
name: Execute golangci-lint
|
||||
entry: make golint
|
||||
language: system
|
||||
files: \.go$
|
||||
files: \.go$
|
||||
|
||||
@@ -7,4 +7,4 @@ See the [Releases](https://github.com/projectcapsule/capsule/releases)
|
||||
|
||||
## Helm Chart
|
||||
|
||||
For the helm chart, a dedicated changelog is created based on the chart's annotations ([See](./DEVELOPMENT.md#helm-changelog)).
|
||||
For the helm chart, a dedicated changelog is created based on the chart's annotations ([See](./DEVELOPMENT.md#helm-changelog)).
|
||||
|
||||
@@ -45,7 +45,7 @@ Prereleases are marked as `-rc.x` (release candidate) and may refere to any type
|
||||
|
||||
The pull request title is checked according to the described [semantics](#semantics) (pull requests don't require a scope). However pull requests are currently not used to generate the changelog. Check if your pull requests body meets the following criteria:
|
||||
|
||||
- reference a previously opened issue: https://docs.github.com/en/github/writing-on-github/autolinked-references-and-urls#issues-and-pull-requests
|
||||
- reference a previously opened issue: https://docs.github.com/en/github/writing-on-github/autolinked-references-and-urls#issues-and-pull-requests
|
||||
- splitting changes into several and documented small commits
|
||||
- limit the git subject to 50 characters and write as the continuation of the
|
||||
sentence "If applied, this commit will ..."
|
||||
@@ -104,7 +104,7 @@ To reorganise your commits, do the following (or use your way of doing it):
|
||||
|
||||
|
||||
1. Pull upstream changes
|
||||
|
||||
|
||||
```bash
|
||||
git remote add upstream git@github.com:projectcapsule/capsule.git
|
||||
git pull upstream main
|
||||
@@ -186,4 +186,3 @@ The following types are allowed for commits and pull requests:
|
||||
* `fix`: bug fixes
|
||||
* `test`: test related changes
|
||||
* `sec`: security related changes
|
||||
|
||||
|
||||
@@ -23,10 +23,10 @@ Capsule maintainers must follow these guidelines when consuming third-party pack
|
||||
|
||||
When adding a new third-party package to Capsule, maintainers must follow these steps:
|
||||
|
||||
1. Evaluate the need for the package. Is it necessary for the functionality of Capsule?
|
||||
2. Research the package. Is it well-maintained? Does it have a good reputation?
|
||||
3. Choose a version of the package. Use the latest version whenever possible.
|
||||
4. Pin the package to the specific version in the Capsule codebase.
|
||||
1. Evaluate the need for the package. Is it necessary for the functionality of Capsule?
|
||||
2. Research the package. Is it well-maintained? Does it have a good reputation?
|
||||
3. Choose a version of the package. Use the latest version whenever possible.
|
||||
4. Pin the package to the specific version in the Capsule codebase.
|
||||
5. Update the Capsule documentation to reflect the new dependency.
|
||||
|
||||
## Archive/Deprecation
|
||||
|
||||
@@ -60,7 +60,7 @@ To achieve that, there are some necessary steps we need to walk through, which h
|
||||
|
||||
So the TL;DR answer is:
|
||||
|
||||
**Make sure a *KinD* cluster is running on your laptop, and then run `make dev-setup` to setup the dev environment.**. This is not done in the `make dev-setup` setup.
|
||||
**Make sure a *KinD* cluster is running on your laptop, and then run `make dev-setup` to setup the dev environment.**. This is not done in the `make dev-setup` setup.
|
||||
|
||||
```bash
|
||||
# If you haven't installed or run `make deploy` before, do it first
|
||||
@@ -222,12 +222,12 @@ time="2023-10-23T13:45:08Z" level=info msg="Found Chart directories [charts/caps
|
||||
time="2023-10-23T13:45:08Z" level=info msg="Generating README Documentation for chart /helm-docs/charts/capsule"
|
||||
```
|
||||
|
||||
This will update the documentation for the chart in the `README.md` file.
|
||||
This will update the documentation for the chart in the `README.md` file.
|
||||
|
||||
### Helm Changelog
|
||||
### Helm Changelog
|
||||
|
||||
The `version` of the chart does not require a bump, since it's driven by our release process. The `appVersion` of the chart is the version of the Capsule project. This is the version that should be bumped when a new Capsule version is released. This will be done by the maintainers.
|
||||
|
||||
To create the proper changelog for the helm chart, all changes which affect the helm chart must be documented as chart annotation. See all the available [chart annotations](https://artifacthub.io/docs/topics/annotations/helm/).
|
||||
|
||||
This annotation can be provided using two different formats: using a plain list of strings with the description of the change or using a list of objects with some extra structured information (see example below). Please feel free to use the one that better suits your needs. The UI experience will be slightly different depending on the choice. When using the list of objects option the valid supported kinds are `added`, `changed`, `deprecated`, `removed`, `fixed` and `security`.
|
||||
This annotation can be provided using two different formats: using a plain list of strings with the description of the change or using a list of objects with some extra structured information (see example below). Please feel free to use the one that better suits your needs. The UI experience will be slightly different depending on the choice. When using the list of objects option the valid supported kinds are `added`, `changed`, `deprecated`, `removed`, `fixed` and `security`.
|
||||
|
||||
@@ -6,7 +6,7 @@ FROM ${TARGET_IMAGE} AS target
|
||||
FROM ghcr.io/alegrey91/harpoon:latest
|
||||
WORKDIR /
|
||||
COPY --from=target /ko-app/capsule ./manager
|
||||
|
||||
RUN chmod +x ./harpoon
|
||||
ENTRYPOINT ["/harpoon", \
|
||||
"capture", \
|
||||
"-f", "main.main", \
|
||||
|
||||
@@ -77,7 +77,7 @@ Maintainers who are selected will be granted the necessary GitHub rights.
|
||||
Maintainers may resign at any time if they feel that they will not be able to
|
||||
continue fulfilling their project duties.
|
||||
|
||||
Maintainers may also be removed after being inactive, failure to fulfill their
|
||||
Maintainers may also be removed after being inactive, failure to fulfill their
|
||||
Maintainer responsibilities, violating the Code of Conduct, or other reasons.
|
||||
A Maintainer may be removed at any time by a 2/3 vote of the remaining maintainers.
|
||||
|
||||
@@ -88,7 +88,7 @@ and can be rapidly returned to Maintainer status if their availability changes.
|
||||
## Meetings
|
||||
|
||||
Time zones permitting, Maintainers are expected to participate in the public
|
||||
developer meeting and/or public discussions.
|
||||
developer meeting and/or public discussions.
|
||||
|
||||
Maintainers will also have closed meetings in order to discuss security reports
|
||||
or Code of Conduct violations. Such meetings should be scheduled by any
|
||||
@@ -110,7 +110,7 @@ violations by community members will be discussed and resolved in private Mainta
|
||||
|
||||
The Maintainers will appoint a Security Response Team to handle security reports.
|
||||
This committee may simply consist of the Maintainer Council themselves. If this
|
||||
responsibility is delegated, the Maintainers will appoint a team of at least two
|
||||
responsibility is delegated, the Maintainers will appoint a team of at least two
|
||||
contributors to handle it. The Maintainers will review who is assigned to this
|
||||
at least once a year.
|
||||
|
||||
@@ -119,15 +119,15 @@ holes and breaches according to the [security policy](TODO:Link to security.md).
|
||||
|
||||
## Voting
|
||||
|
||||
While most business in Capsule Project is conducted by "[lazy consensus](https://community.apache.org/committers/lazyConsensus.html)",
|
||||
While most business in Capsule Project is conducted by "[lazy consensus](https://community.apache.org/committers/lazyConsensus.html)",
|
||||
periodically the Maintainers may need to vote on specific actions or changes.
|
||||
Any Maintainer may demand a vote be taken.
|
||||
|
||||
Most votes require a simple majority of all Maintainers to succeed, except where
|
||||
otherwise noted. Two-thirds majority votes mean at least two-thirds of all
|
||||
otherwise noted. Two-thirds majority votes mean at least two-thirds of all
|
||||
existing maintainers.
|
||||
|
||||
## Modifying this Charter
|
||||
|
||||
Changes to this Governance and its supporting documents may be approved by
|
||||
a 2/3 vote of the Maintainers.
|
||||
Changes to this Governance and its supporting documents may be approved by
|
||||
a 2/3 vote of the Maintainers.
|
||||
|
||||
@@ -10,4 +10,4 @@ The current Maintainers Group for the [TODO: Projectname] Project consists of:
|
||||
|
||||
This list must be kept in sync with the [CNCF Project Maintainers list](https://github.com/cncf/foundation/blob/master/project-maintainers.csv).
|
||||
|
||||
See [the project Governance](GOVERNANCE.md) for how maintainers are selected and replaced.
|
||||
See [the project Governance](GOVERNANCE.md) for how maintainers are selected and replaced.
|
||||
|
||||
2
Makefile
@@ -325,7 +325,7 @@ helm-doc:
|
||||
# -- Tools
|
||||
####################
|
||||
CONTROLLER_GEN := $(LOCALBIN)/controller-gen
|
||||
CONTROLLER_GEN_VERSION ?= v0.17.2
|
||||
CONTROLLER_GEN_VERSION ?= v0.17.3
|
||||
CONTROLLER_GEN_LOOKUP := kubernetes-sigs/controller-tools
|
||||
controller-gen:
|
||||
@test -s $(CONTROLLER_GEN) && $(CONTROLLER_GEN) --version | grep -q $(CONTROLLER_GEN_VERSION) || \
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# Roadmap
|
||||
|
||||
future features and fixes are planned with [release milestones on GitHub](https://github.com/projectcapsule/capsule/milestones?direction=asc&sort=due_date&state=open). You can influence the roadmap by opening issues or joining our community meetings.
|
||||
future features and fixes are planned with [release milestones on GitHub](https://github.com/projectcapsule/capsule/milestones?direction=asc&sort=due_date&state=open). You can influence the roadmap by opening issues or joining our community meetings.
|
||||
|
||||
@@ -81,7 +81,7 @@ Capsule was accepted as a CNCF sandbox project in December 2022.
|
||||
It's the Operator which provides all the multi-tenant capabilities offered by Capsule.
|
||||
It's made of two internal components, such as the webhooks server (known as _policy engine_), and the _tenant controller_.
|
||||
|
||||
**Capsule Tenant Controller**
|
||||
**Capsule Tenant Controller**
|
||||
|
||||
The controller is responsible for managing the tenants by reconciling the required objects at the Namespace level, such as _Network Policy_, _LimitRange_, _ResourceQuota_, _Role Binding_, as well as labelling the Namespace objects belonging to a Tenant according to their desired metadata.
|
||||
It is responsible for binding Namespaces to the selected Tenant, and managing their lifecycle.
|
||||
@@ -90,10 +90,10 @@ Furthermore, the manager can replicate objects thanks to the **Tenant Resource**
|
||||
|
||||
The replicated resources are dynamically created, and replicated by Capsule itself, as well as preserving the deletion of these objects by the Tenant owner.
|
||||
|
||||
**Capsule Tenant Controller (Policy Engine)**
|
||||
**Capsule Tenant Controller (Policy Engine)**
|
||||
|
||||
Policies are defined on a Tenant basis: therefore the policy engine is enforcing these policies on the tenants's Namespaces and their children's resources.
|
||||
The Policy Engine is currently not a dedicated component, but a part of the Capsule Tenant Controller.
|
||||
The Policy Engine is currently not a dedicated component, but a part of the Capsule Tenant Controller.
|
||||
|
||||
The webhook server, also known as the policy engine, interpolates the Tenant rules and takes full advantage of the dynamic admission controllers offered by Kubernetes itself (such as `ValidatingWebhookConfiguration` and `MutatingWebhookConfiguration`).
|
||||
Thanks to the _policy engine_ the cluster administrators can enforce specific rules such as preventing _Pod_ objects from untrusted registries to run or preventing the creation of _PersistentVolumeClaim_ resources using a non-allowed _StorageClass_, etc.
|
||||
@@ -152,7 +152,7 @@ This is a further abstraction from having cluster defaults (eg. default `Storage
|
||||
|
||||
**General**
|
||||
|
||||
* **Control Plane**: Capsule can't mimic for each tenant a feeling of a dedicated control plane.
|
||||
* **Control Plane**: Capsule can't mimic for each tenant a feeling of a dedicated control plane.
|
||||
|
||||
* **Custom Resource Definitions**: Capsule doesn't want to provide virtual cluster capabilities and it's sticking to the native Kubernetes user experience and design; rather, its focus is to provide a governance solution by focusing on resource optimization and security lockdown.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
dependencies:
|
||||
- name: capsule-proxy
|
||||
repository: oci://ghcr.io/projectcapsule/charts
|
||||
version: 0.9.1
|
||||
digest: sha256:509f9d3d3c0181d9e5a410524d4767a687d8176620d24f7e460f354f18c0a5f8
|
||||
generated: "2025-02-10T13:33:33.19014368Z"
|
||||
version: 0.9.3
|
||||
digest: sha256:057afc3b971a7ffe5ada7d358d759ab3383ffca61aed07e224f3f6c4338568ee
|
||||
generated: "2025-04-26T05:29:13.486605681Z"
|
||||
|
||||
@@ -6,7 +6,7 @@ home: https://github.com/projectcapsule/capsule
|
||||
icon: https://github.com/projectcapsule/capsule/raw/main/assets/logo/capsule_small.png
|
||||
dependencies:
|
||||
- name: capsule-proxy
|
||||
version: 0.9.1
|
||||
version: 0.9.3
|
||||
repository: "oci://ghcr.io/projectcapsule/charts"
|
||||
condition: proxy.enabled
|
||||
alias: proxy
|
||||
|
||||
@@ -11,4 +11,4 @@ spec:
|
||||
{{- include "capsule.webhooks.service" (dict "path" "/convert" "ctx" $) | nindent 8 }}
|
||||
conversionReviewVersions:
|
||||
- v1beta1
|
||||
- v1beta2
|
||||
- v1beta2
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
controller-gen.kubebuilder.io/version: v0.17.3
|
||||
name: capsuleconfigurations.capsule.clastix.io
|
||||
spec:
|
||||
group: capsule.clastix.io
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
controller-gen.kubebuilder.io/version: v0.17.3
|
||||
name: globaltenantresources.capsule.clastix.io
|
||||
spec:
|
||||
group: capsule.clastix.io
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
controller-gen.kubebuilder.io/version: v0.17.3
|
||||
name: tenantresources.capsule.clastix.io
|
||||
spec:
|
||||
group: capsule.clastix.io
|
||||
|
||||
@@ -11,4 +11,4 @@ spec:
|
||||
{{- include "capsule.webhooks.service" (dict "path" "/convert" "ctx" $) | nindent 8 }}
|
||||
conversionReviewVersions:
|
||||
- v1beta1
|
||||
- v1beta2
|
||||
- v1beta2
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
controller-gen.kubebuilder.io/version: v0.17.3
|
||||
name: tenants.capsule.clastix.io
|
||||
spec:
|
||||
group: capsule.clastix.io
|
||||
|
||||
@@ -154,5 +154,3 @@ Capsule Webhook endpoint CA Bundle
|
||||
caBundle: {{ $.Values.webhooks.service.caBundle -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ spec:
|
||||
- {{ include "capsule.fullname" . }}-webhook-service.{{ .Release.Namespace }}.svc
|
||||
- {{ include "capsule.fullname" . }}-webhook-service.{{ .Release.Namespace }}.svc.cluster.local
|
||||
{{- range .Values.certManager.additionalSANS }}
|
||||
- {{ toYaml . }}
|
||||
- {{ toYaml . }}
|
||||
{{- end }}
|
||||
issuerRef:
|
||||
kind: Issuer
|
||||
|
||||
@@ -26,4 +26,3 @@ spec:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
@@ -13,5 +13,3 @@ crd-install-hook
|
||||
{{- define "capsule.crds.regexReplace" -}}
|
||||
{{- printf "%s" ($ | base | trimSuffix ".yaml" | regexReplaceAll "[_.]" "-") -}}
|
||||
{{- end }}
|
||||
|
||||
|
||||
|
||||
@@ -53,4 +53,4 @@ data:
|
||||
{{- end }}
|
||||
{{ end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -47,7 +47,7 @@ spec:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $Values.priorityClassName }}
|
||||
@@ -56,7 +56,7 @@ spec:
|
||||
{{- with $Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "capsule.crds.name" . }}
|
||||
containers:
|
||||
- name: crds-hook
|
||||
@@ -98,4 +98,4 @@ spec:
|
||||
path: {{ $path | base }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -49,4 +49,4 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "capsule.crds.name" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -11,4 +11,4 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: {{ include "capsule.crds.component" . | quote }}
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -31,7 +31,7 @@ webhooks:
|
||||
- pods
|
||||
scope: "Namespaced"
|
||||
namespaceSelector:
|
||||
{{- toYaml .namespaceSelector | nindent 4}}
|
||||
{{- toYaml .namespaceSelector | nindent 4}}
|
||||
sideEffects: None
|
||||
timeoutSeconds: {{ $.Values.webhooks.mutatingWebhooksTimeoutSeconds }}
|
||||
{{- end }}
|
||||
@@ -53,11 +53,11 @@ webhooks:
|
||||
- persistentvolumeclaims
|
||||
scope: "Namespaced"
|
||||
namespaceSelector:
|
||||
{{- toYaml .namespaceSelector | nindent 4}}
|
||||
{{- toYaml .namespaceSelector | nindent 4}}
|
||||
sideEffects: None
|
||||
timeoutSeconds: {{ $.Values.webhooks.mutatingWebhooksTimeoutSeconds }}
|
||||
{{- end }}
|
||||
{{- with .Values.webhooks.hooks.defaults.ingress }}
|
||||
{{- with .Values.webhooks.hooks.defaults.ingress }}
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
@@ -81,7 +81,7 @@ webhooks:
|
||||
sideEffects: None
|
||||
timeoutSeconds: {{ $.Values.webhooks.mutatingWebhooksTimeoutSeconds }}
|
||||
{{- end }}
|
||||
{{- with .Values.webhooks.hooks.namespaceOwnerReference }}
|
||||
{{- with .Values.webhooks.hooks.namespaceOwnerReference }}
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
- v1beta1
|
||||
|
||||
@@ -9,4 +9,3 @@
|
||||
{{- define "capsule.post-install.component" -}}
|
||||
post-install-hook
|
||||
{{- end }}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ spec:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $Values.priorityClassName }}
|
||||
@@ -59,7 +59,7 @@ spec:
|
||||
- name: post-install
|
||||
image: {{ include "capsule.jobsFullyQualifiedDockerImage" . }}
|
||||
imagePullPolicy: {{ $Values.image.pullPolicy }}
|
||||
command:
|
||||
command:
|
||||
- "sh"
|
||||
- "-c"
|
||||
- |
|
||||
@@ -81,4 +81,4 @@ spec:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -41,4 +41,4 @@ subjects:
|
||||
name: {{ include "capsule.post-install.name" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -12,4 +12,4 @@ metadata:
|
||||
app.kubernetes.io/component: {{ include "capsule.post-install.component" . | quote }}
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -12,4 +12,3 @@
|
||||
{{- define "capsule.pre-delete.component" -}}
|
||||
pre-delete-hook
|
||||
{{- end }}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ spec:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $Values.priorityClassName }}
|
||||
@@ -82,4 +82,4 @@ spec:
|
||||
resources:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -87,4 +87,4 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "capsule.pre-delete.name" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -11,4 +11,4 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: {{ include "capsule.pre-delete.component" . | quote }}
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -29,7 +29,7 @@ spec:
|
||||
{{- with .relabelings }}
|
||||
relabelings: {{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
jobLabel: app.kubernetes.io/name
|
||||
{{- with .Values.serviceMonitor.targetLabels }}
|
||||
targetLabels: {{- toYaml . | nindent 4 }}
|
||||
@@ -46,4 +46,3 @@ spec:
|
||||
- {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
@@ -274,4 +274,4 @@ webhooks:
|
||||
sideEffects: None
|
||||
timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -16,5 +16,5 @@ const Configuration = {
|
||||
helpUrl:
|
||||
'https://github.com/projectcapsule/capsule/blob/main/CONTRIBUTING.md#commits',
|
||||
};
|
||||
|
||||
module.exports = Configuration;
|
||||
|
||||
module.exports = Configuration;
|
||||
|
||||
@@ -35,6 +35,13 @@ type Processor struct {
|
||||
client client.Client
|
||||
}
|
||||
|
||||
func prepareAdditionalMetadata(m map[string]string) map[string]string {
|
||||
if m == nil {
|
||||
return make(map[string]string)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (r *Processor) HandlePruning(ctx context.Context, current, desired sets.Set[string]) (updateStatus bool) {
|
||||
log := ctrllog.FromContext(ctx)
|
||||
|
||||
@@ -118,8 +125,8 @@ func (r *Processor) HandleSection(ctx context.Context, tnt capsulev1beta2.Tenant
|
||||
objAnnotations, objLabels := map[string]string{}, map[string]string{}
|
||||
|
||||
if spec.AdditionalMetadata != nil {
|
||||
objAnnotations = spec.AdditionalMetadata.Annotations
|
||||
objLabels = spec.AdditionalMetadata.Labels
|
||||
objAnnotations = prepareAdditionalMetadata(spec.AdditionalMetadata.Annotations)
|
||||
objLabels = prepareAdditionalMetadata(spec.AdditionalMetadata.Labels)
|
||||
}
|
||||
|
||||
objAnnotations[tenantLabel] = tnt.GetName()
|
||||
|
||||
|
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 29 KiB |
@@ -29,7 +29,7 @@ $ export LAPTOP_HOST_IP=192.168.10.101
|
||||
# Refer to here for more options: https://k3d.io/v4.4.8/usage/commands/k3d_cluster_create/
|
||||
$ k3d cluster create k3s-capsule --servers 1 --agents 1 --no-lb --k3s-server-arg --tls-san=${LAPTOP_HOST_IP}
|
||||
|
||||
# Get Kubeconfig
|
||||
# Get Kubeconfig
|
||||
$ k3d kubeconfig get k3s-capsule > /tmp/k3s-capsule && export KUBECONFIG="/tmp/k3s-capsule"
|
||||
|
||||
# This will create a cluster with 1 server and 1 worker node
|
||||
|
||||
@@ -21,4 +21,4 @@ Please, refer to the [maintainers file](https://github.com/projectcapsule/capsul
|
||||
|
||||
## Roadmap Planning
|
||||
|
||||
Maintainers will share roadmap and release versions as milestones in GitHub.
|
||||
Maintainers will share roadmap and release versions as milestones in GitHub.
|
||||
|
||||
@@ -5936,4 +5936,4 @@ Returns the observed state of the Tenant.
|
||||
</td>
|
||||
<td>false</td>
|
||||
</tr></tbody>
|
||||
</table>
|
||||
</table>
|
||||
|
||||
@@ -10,7 +10,7 @@ You can use the [Capsule Helm Chart](https://github.com/projectcapsule/capsule/b
|
||||
|
||||
### Install with Helm Chart
|
||||
|
||||
Please, refer to the instructions reported in the Capsule Helm Chart [README](https://github.com/projectcapsule/capsule/blob/master/charts/capsule/README.md).
|
||||
Please, refer to the instructions reported in the Capsule Helm Chart [README](https://github.com/projectcapsule/capsule/blob/master/charts/capsule/README.md).
|
||||
|
||||
## Create your first Tenant
|
||||
|
||||
@@ -90,7 +90,7 @@ $ kubectl create namespace oil-development
|
||||
And operate with fully admin permissions:
|
||||
|
||||
```
|
||||
$ kubectl -n oil-development run nginx --image=docker.io/nginx
|
||||
$ kubectl -n oil-development run nginx --image=docker.io/nginx
|
||||
$ kubectl -n oil-development get pods
|
||||
```
|
||||
|
||||
|
||||
@@ -78,22 +78,22 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, retrieve the networkpolicies resources in the tenant namespace
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice get networkpolicies
|
||||
```bash
|
||||
kubectl --kubeconfig alice get networkpolicies
|
||||
NAME POD-SELECTOR AGE
|
||||
capsule-oil-0 <none> 7m5s
|
||||
```
|
||||
|
||||
As a tenant, checks for permissions to manage networkpolicy for each verb
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i get networkpolicies
|
||||
kubectl --kubeconfig alice auth can-i create networkpolicies
|
||||
kubectl --kubeconfig alice auth can-i update networkpolicies
|
||||
@@ -107,7 +107,7 @@ Each command must return 'yes'
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
|
||||
@@ -145,14 +145,14 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner check for permissions to manage rolebindings for each verb
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i get rolebindings
|
||||
kubectl --kubeconfig alice auth can-i create rolebindings
|
||||
kubectl --kubeconfig alice auth can-i update rolebindings
|
||||
@@ -166,7 +166,7 @@ Each command must return 'yes'
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
|
||||
@@ -204,14 +204,14 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, check for permissions to manage roles for each verb
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i get roles
|
||||
kubectl --kubeconfig alice auth can-i create roles
|
||||
kubectl --kubeconfig alice auth can-i update roles
|
||||
@@ -225,7 +225,7 @@ Each command must return 'yes'
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
|
||||
@@ -261,12 +261,12 @@ EOF
|
||||
```
|
||||
|
||||
As cluster admin, run the following command to retrieve the list of non-namespaced resources
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin api-resources --namespaced=false
|
||||
```
|
||||
For all non-namespaced resources, and each verb (get, list, create, update, patch, watch, delete, and deletecollection) issue the following command:
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i <verb> <resource>
|
||||
```
|
||||
Each command must return `no`
|
||||
@@ -275,7 +275,7 @@ Each command must return `no`
|
||||
|
||||
It should, but it does not:
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i create selfsubjectaccessreviews
|
||||
yes
|
||||
kubectl --kubeconfig alice auth can-i create selfsubjectrulesreviews
|
||||
@@ -286,7 +286,7 @@ yes
|
||||
|
||||
Any kubernetes user can create `SelfSubjectAccessReview` and `SelfSubjectRulesReviews` to checks whether he/she can act. First, two exceptions are not an issue.
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --anyuser auth can-i --list
|
||||
Resources Non-Resource URLs Resource Names Verbs
|
||||
selfsubjectaccessreviews.authorization.k8s.io [] [] [create]
|
||||
@@ -322,7 +322,7 @@ Role:
|
||||
Subjects:
|
||||
Kind Name Namespace
|
||||
---- ---- ---------
|
||||
Group capsule.clastix.io
|
||||
Group capsule.clastix.io
|
||||
|
||||
kubectl describe clusterrole capsule-namespace-provisioner
|
||||
Name: capsule-namespace-provisioner
|
||||
@@ -339,7 +339,7 @@ Capsule controls self-service namespace creation by limiting the number of names
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
|
||||
@@ -397,15 +397,15 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, retrieve the networkpolicies resources in the tenant namespace
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice get networkpolicies
|
||||
```bash
|
||||
kubectl --kubeconfig alice get networkpolicies
|
||||
NAME POD-SELECTOR AGE
|
||||
capsule-oil-0 <none> 7m5s
|
||||
capsule-oil-1 <none> 7m5s
|
||||
@@ -413,13 +413,13 @@ capsule-oil-1 <none> 7m5s
|
||||
|
||||
As tenant owner try to modify or delete one of the networkpolicies
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice delete networkpolicies capsule-oil-0
|
||||
```
|
||||
|
||||
You should receive an error message denying the edit/delete request
|
||||
|
||||
```bash
|
||||
```bash
|
||||
Error from server (Forbidden): networkpolicies.networking.k8s.io "capsule-oil-0" is forbidden:
|
||||
User "oil" cannot delete resource "networkpolicies" in API group "networking.k8s.io" in the namespace "oil-production"
|
||||
```
|
||||
@@ -434,7 +434,7 @@ metadata:
|
||||
name: hijacking
|
||||
namespace: oil-production
|
||||
spec:
|
||||
egress:
|
||||
egress:
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: 0.0.0.0/0
|
||||
@@ -448,7 +448,7 @@ However, due to the additive nature of networkpolicies, the `DENY ALL` policy se
|
||||
|
||||
As tenant owner list RBAC permissions set by Capsule
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice get rolebindings
|
||||
NAME ROLE AGE
|
||||
capsule-oil-0-admin ClusterRole/admin 11h
|
||||
@@ -457,7 +457,7 @@ capsule-oil-1-capsule-namespace-deleter ClusterRole/capsule-namespace-deleter
|
||||
|
||||
As tenant owner, try to change/delete the rolebinding to escalate permissions
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice edit/delete rolebinding capsule-oil-0-admin
|
||||
```
|
||||
|
||||
@@ -495,7 +495,7 @@ EOF
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
|
||||
@@ -551,14 +551,14 @@ EOF
|
||||
|
||||
As `oil` tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As `gas` tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig joe create ns gas-production
|
||||
kubectl --kubeconfig joe config set-context --current --namespace gas-production
|
||||
```
|
||||
@@ -566,8 +566,8 @@ kubectl --kubeconfig joe config set-context --current --namespace gas-production
|
||||
|
||||
As `oil` tenant owner, try to retrieve the resources in the `gas` tenant namespaces
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice get serviceaccounts --namespace gas-production
|
||||
```bash
|
||||
kubectl --kubeconfig alice get serviceaccounts --namespace gas-production
|
||||
```
|
||||
|
||||
You must receive an error message:
|
||||
@@ -579,8 +579,8 @@ User "oil" cannot list resource "serviceaccounts" in API group "" in the namespa
|
||||
|
||||
As `gas` tenant owner, try to retrieve the resources in the `oil` tenant namespaces
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig joe get serviceaccounts --namespace oil-production
|
||||
```bash
|
||||
kubectl --kubeconfig joe get serviceaccounts --namespace oil-production
|
||||
```
|
||||
|
||||
You must receive an error message:
|
||||
@@ -593,7 +593,7 @@ User "joe" cannot list resource "serviceaccounts" in API group "" in the namespa
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenants oil gas
|
||||
```
|
||||
|
||||
@@ -681,15 +681,15 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod and see new capabilities cannot be added in the tenant namespaces
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@@ -713,7 +713,7 @@ You must have the pod blocked by PodSecurityPolicy.
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
@@ -765,14 +765,14 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, check the permissions to modify/delete the quota in the tenant namespace:
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i create quota
|
||||
kubectl --kubeconfig alice auth can-i update quota
|
||||
kubectl --kubeconfig alice auth can-i patch quota
|
||||
@@ -785,7 +785,7 @@ Each command must return 'no'
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
|
||||
@@ -859,7 +859,7 @@ EOF
|
||||
|
||||
As `oil` tenant owner, run the following commands to create a namespace and resources in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
kubectl --kubeconfig alice run webserver --image nginx:latest
|
||||
@@ -868,7 +868,7 @@ kubectl --kubeconfig alice expose pod webserver --port 80
|
||||
|
||||
As `gas` tenant owner, run the following commands to create a namespace and resources in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig joe create ns gas-production
|
||||
kubectl --kubeconfig joe config set-context --current --namespace gas-production
|
||||
kubectl --kubeconfig joe run webserver --image nginx:latest
|
||||
@@ -877,14 +877,14 @@ kubectl --kubeconfig joe expose pod webserver --port 80
|
||||
|
||||
As `oil` tenant owner, verify you can access the service in `oil` tenant namespace but not in the `gas` tenant namespace
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice exec webserver -- curl http://webserver.oil-production.svc.cluster.local
|
||||
kubectl --kubeconfig alice exec webserver -- curl http://webserver.gas-production.svc.cluster.local
|
||||
```
|
||||
|
||||
Viceversa, as `gas` tenant owner, verify you can access the service in `gas` tenant namespace but not in the `oil` tenant namespace
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice exec webserver -- curl http://webserver.oil-production.svc.cluster.local
|
||||
kubectl --kubeconfig alice exec webserver -- curl http://webserver.gas-production.svc.cluster.local
|
||||
```
|
||||
@@ -893,7 +893,7 @@ kubectl --kubeconfig alice exec webserver -- curl http://webserver.gas-productio
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenants oil gas
|
||||
```
|
||||
|
||||
@@ -977,15 +977,15 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod or container that sets `allowPrivilegeEscalation=true` in its `securityContext`.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@@ -1007,7 +1007,7 @@ You must have the pod blocked by `PodSecurityPolicy`.
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
@@ -1094,15 +1094,15 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod or container that sets privileges in its `securityContext`.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@@ -1124,7 +1124,7 @@ You must have the pod blocked by `PodSecurityPolicy`.
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
@@ -1163,7 +1163,7 @@ EOF
|
||||
|
||||
As tenant owner, check if you can access the persistent volumes
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i get persistentvolumes
|
||||
kubectl --kubeconfig alice auth can-i list persistentvolumes
|
||||
kubectl --kubeconfig alice auth can-i watch persistentvolumes
|
||||
@@ -1253,15 +1253,15 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod mounting the host IPC namespace.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@@ -1281,7 +1281,7 @@ You must have the pod blocked by `PodSecurityPolicy`.
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
@@ -1370,15 +1370,15 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod using `hostNetwork`
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@@ -1396,8 +1396,8 @@ EOF
|
||||
|
||||
As tenant owner, create a pod defining a container using `hostPort`
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@@ -1418,7 +1418,7 @@ In both the cases above, you must have the pod blocked by `PodSecurityPolicy`.
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
@@ -1511,15 +1511,15 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod defining a volume of type `hostpath`.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@@ -1547,7 +1547,7 @@ You must have the pod blocked by `PodSecurityPolicy`.
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
@@ -1635,15 +1635,15 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod mounting the host PID namespace.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@@ -1663,7 +1663,7 @@ You must have the pod blocked by `PodSecurityPolicy`.
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
@@ -1705,7 +1705,7 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
@@ -1743,7 +1743,7 @@ NodePort service types are forbidden for the tenant: please, reach out to the sy
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
|
||||
@@ -1789,20 +1789,20 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, retrieve the configured quotas in the tenant namespace:
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice get quota
|
||||
NAME AGE REQUEST LIMIT
|
||||
capsule-oil-0 23s persistentvolumeclaims: 0/100,
|
||||
pods: 0/100, services: 0/50,
|
||||
services.loadbalancers: 0/3,
|
||||
services.nodeports: 0/20
|
||||
services.nodeports: 0/20
|
||||
```
|
||||
|
||||
Make sure that a quota is configured for API objects: `PersistentVolumeClaim`, `LoadBalancer`, `NodePort`, `Pods`, etc
|
||||
@@ -1810,7 +1810,7 @@ Make sure that a quota is configured for API objects: `PersistentVolumeClaim`, `
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
|
||||
@@ -1857,18 +1857,18 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, retrieve the configured quotas in the tenant namespace:
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice get quota
|
||||
NAME AGE REQUEST LIMIT
|
||||
capsule-oil-0 24s requests.cpu: 0/8, requests.memory: 0/16Gi limits.cpu: 0/8, limits.memory: 0/16Gi
|
||||
capsule-oil-1 24s requests.storage: 0/10Gi
|
||||
capsule-oil-0 24s requests.cpu: 0/8, requests.memory: 0/16Gi limits.cpu: 0/8, limits.memory: 0/16Gi
|
||||
capsule-oil-1 24s requests.storage: 0/10Gi
|
||||
```
|
||||
|
||||
Make sure that a quota is configured for CPU, memory, and storage resources.
|
||||
@@ -1876,7 +1876,7 @@ Make sure that a quota is configured for CPU, memory, and storage resources.
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
|
||||
@@ -1915,7 +1915,7 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
@@ -1948,7 +1948,7 @@ ImagePullPolicy IfNotPresent for container nginx is forbidden, use one of the fo
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
|
||||
@@ -1978,7 +1978,7 @@ spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
volumes:
|
||||
volumes:
|
||||
- 'persistentVolumeClaim'
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
@@ -2035,15 +2035,15 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod defining a volume of any of the core type except `PersistentVolumeClaim`. For example:
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@@ -2071,7 +2071,7 @@ You must have the pod blocked by `PodSecurityPolicy`.
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
@@ -2126,7 +2126,7 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
@@ -2160,7 +2160,7 @@ A valid Storage Class must be used, one of the following (delete-policy)
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete storageclass delete-policy
|
||||
```
|
||||
@@ -2252,15 +2252,15 @@ EOF
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod or container that does not set `runAsNonRoot` to `true` in its `securityContext`, and `runAsUser` must not be set to 0.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@@ -2279,7 +2279,7 @@ You must have the pod blocked by `PodSecurityPolicy`.
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
|
||||
@@ -70,7 +70,7 @@ Here how it looks like when exposed through an Ingress Controller:
|
||||
kubectl ------>|:443 |--------->|:9001 |-------->|:6443 |
|
||||
+-----------+ +-----------+ +-----------+
|
||||
ingress-controller capsule-proxy kube-apiserver
|
||||
```
|
||||
```
|
||||
|
||||
## CLI flags
|
||||
|
||||
@@ -420,7 +420,7 @@ spec:
|
||||
operator: Exists
|
||||
values:
|
||||
- bronze
|
||||
- silver
|
||||
- silver
|
||||
```
|
||||
|
||||
In the Kubernetes cluster we could have more RuntimeClasses resources, some of them forbidden and non-usable by the Tenant owner.
|
||||
|
||||
@@ -4,8 +4,8 @@ Reference document for Capsule Operator configuration
|
||||
|
||||
## Custom Resource Definition
|
||||
|
||||
Capsule operator uses a Custom Resources Definition (CRD) for _Tenants_.
|
||||
Tenants are cluster wide resources, so you need cluster level permissions to work with tenants.
|
||||
Capsule operator uses a Custom Resources Definition (CRD) for _Tenants_.
|
||||
Tenants are cluster wide resources, so you need cluster level permissions to work with tenants.
|
||||
You can learn about tenant CRDs in the following [section](./crds-apis)
|
||||
|
||||
## Capsule Configuration
|
||||
@@ -39,7 +39,7 @@ Option | Description
|
||||
`.metadata.annotations.capsule.clastix.io/validating-webhook-configuration-name` | Set the ValidatingWebhookConfiguration name | `validating-webhook-configuration-name`
|
||||
|
||||
Upon installation using Kustomize or Helm, a `capsule-default` resource will be created.
|
||||
The reference to this configuration is managed by the CLI flag `--configuration-name`.
|
||||
The reference to this configuration is managed by the CLI flag `--configuration-name`.
|
||||
|
||||
## Capsule Permissions
|
||||
|
||||
@@ -105,4 +105,4 @@ capsule-system secret/capsule-tls
|
||||
capsule-system service/capsule-controller-manager-metrics-service
|
||||
capsule-system service/capsule-webhook-service
|
||||
capsule-system deployment.apps/capsule-controller-manager
|
||||
```
|
||||
```
|
||||
|
||||
@@ -7,7 +7,7 @@ Capsule is a framework to implement multi-tenant and policy-driven scenarios in
|
||||
* ***Bill***: the cluster administrator from the operations department of _Acme Corp_.
|
||||
|
||||
* ***Alice***: the project leader in the _Oil_ & _Gas_ departments. She is responsible for a team made of different job responsibilities: e.g. developers, administrators, SRE engineers, etc.
|
||||
|
||||
|
||||
* ***Joe***: works as a lead developer of a distributed team in Alice's organization.
|
||||
|
||||
* ***Bob***: is the head of engineering for the _Water_ department, the main and historical line of business at _Acme Corp_.
|
||||
@@ -54,7 +54,7 @@ Alice can log in using her credentials and check if she can create a namespace
|
||||
```
|
||||
kubectl auth can-i create namespaces
|
||||
yes
|
||||
```
|
||||
```
|
||||
|
||||
or even delete the namespace
|
||||
|
||||
@@ -186,7 +186,7 @@ kubectl patch capsuleconfigurations default \
|
||||
--type=merge
|
||||
```
|
||||
|
||||
> Please, pay attention when setting a service account acting as tenant owner. Make sure you're not using the group `system:serviceaccounts` or the group `system:serviceaccounts:{capsule-namespace}` as Capsule group, otherwise you'll create a short-circuit in the Capsule controller, being Capsule itself controlled by a serviceaccount.
|
||||
> Please, pay attention when setting a service account acting as tenant owner. Make sure you're not using the group `system:serviceaccounts` or the group `system:serviceaccounts:{capsule-namespace}` as Capsule group, otherwise you'll create a short-circuit in the Capsule controller, being Capsule itself controlled by a serviceaccount.
|
||||
|
||||
### Roles assigned to Tenant Owners
|
||||
|
||||
@@ -324,7 +324,7 @@ capsule-oil-3-prometheus-servicemonitors-viewer ClusterRole/prometheus-service
|
||||
```
|
||||
|
||||
### Assign additional Role Bindings
|
||||
The tenant owner acts as admin of tenant namespaces. Other users can operate inside the tenant namespaces with different levels of permissions and authorizations.
|
||||
The tenant owner acts as admin of tenant namespaces. Other users can operate inside the tenant namespaces with different levels of permissions and authorizations.
|
||||
|
||||
Assuming the cluster admin creates:
|
||||
|
||||
@@ -378,7 +378,7 @@ The enforcement of this naming convention is optional and can be controlled by t
|
||||
Alice can deploy any resource in any of the namespaces
|
||||
|
||||
```
|
||||
kubectl -n oil-development run nginx --image=docker.io/nginx
|
||||
kubectl -n oil-development run nginx --image=docker.io/nginx
|
||||
kubectl -n oil-development get pods
|
||||
```
|
||||
|
||||
@@ -643,7 +643,7 @@ metadata:
|
||||
...
|
||||
```
|
||||
|
||||
When the aggregate usage for all namespaces crosses the hard quota, then the native `ResourceQuota` Admission Controller in Kubernetes denies Alice's request to create resources exceeding the quota:
|
||||
When the aggregate usage for all namespaces crosses the hard quota, then the native `ResourceQuota` Admission Controller in Kubernetes denies Alice's request to create resources exceeding the quota:
|
||||
|
||||
```
|
||||
kubectl -n oil-development create deployment nginx --image nginx:latest --replicas 10
|
||||
@@ -662,7 +662,7 @@ nginx-55649fd747-mlhlq 1/1 Running 0 12s
|
||||
nginx-55649fd747-t48s5 1/1 Running 0 7s
|
||||
```
|
||||
|
||||
and
|
||||
and
|
||||
|
||||
```
|
||||
kubectl -n oil-production get pods
|
||||
@@ -721,7 +721,7 @@ spec:
|
||||
```
|
||||
|
||||
Limits will be inherited by all the namespaces created by Alice. In our case, when Alice creates the namespace `oil-production`, Capsule creates the following:
|
||||
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: LimitRange
|
||||
@@ -791,7 +791,7 @@ no
|
||||
|
||||
## Assign Pod Priority Classes
|
||||
|
||||
Pods can have priority. Priority indicates the importance of a Pod relative to other Pods. If a Pod cannot be scheduled, the scheduler tries to preempt (evict) lower priority Pods to make scheduling of the pending Pod possible. See [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/).
|
||||
Pods can have priority. Priority indicates the importance of a Pod relative to other Pods. If a Pod cannot be scheduled, the scheduler tries to preempt (evict) lower priority Pods to make scheduling of the pending Pod possible. See [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/).
|
||||
|
||||
In a multi-tenant cluster, not all users can be trusted, as a tenant owner could create Pods at the highest possible priorities, causing other Pods to be evicted/not get scheduled.
|
||||
|
||||
@@ -819,7 +819,7 @@ EOF
|
||||
With the said Tenant specification, Alice can create a Pod resource if `spec.priorityClassName` equals to:
|
||||
|
||||
- `custom`
|
||||
- `tier-gold`, `tier-silver`, or `tier-bronze`, since these compile the allowed regex.
|
||||
- `tier-gold`, `tier-silver`, or `tier-bronze`, since these compile the allowed regex.
|
||||
- Any PriorityClass which has the label `env` with the value `production`
|
||||
|
||||
If a Pod is going to use a non-allowed _Priority Class_, it will be rejected by the Validation Webhook enforcing it.
|
||||
@@ -872,7 +872,7 @@ If a Pod has no value for `spec.priorityClassName`, the default value for Priori
|
||||
## Assign Pod Runtime Classes
|
||||
|
||||
Pods can be assigned different runtime classes. With the assigned runtime you can control Container Runtime Interface (CRI) is used for each pod.
|
||||
See [Kubernetes documentation](https://kubernetes.io/docs/concepts/containers/runtime-class/) for more information.
|
||||
See [Kubernetes documentation](https://kubernetes.io/docs/concepts/containers/runtime-class/) for more information.
|
||||
|
||||
To prevent misuses of Pod Runtime Classes, Bill, the cluster admin, can enforce the allowed Pod Runtime Class at tenant level:
|
||||
|
||||
@@ -968,7 +968,7 @@ no
|
||||
## Assign Ingress Classes
|
||||
An Ingress Controller is used in Kubernetes to publish services and applications outside of the cluster. An Ingress Controller can be provisioned to accept only Ingresses with a given Ingress Class.
|
||||
|
||||
Bill can assign a set of dedicated Ingress Classes to the `oil` tenant to force the applications in the `oil` tenant to be published only by the assigned Ingress Controller:
|
||||
Bill can assign a set of dedicated Ingress Classes to the `oil` tenant to force the applications in the `oil` tenant to be published only by the assigned Ingress Controller:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
@@ -1071,14 +1071,14 @@ EOF
|
||||
If an Ingress has no value for `spec.ingressClassName` or `metadata.annotations."kubernetes.io/ingress.class"`, the `tenant-default` IngressClass is automatically applied to the Ingress resource.
|
||||
|
||||
> This feature allows specifying a custom default value on a Tenant basis, bypassing the global cluster default (with the annotation `metadata.annotations.ingressclass.kubernetes.io/is-default-class=true`) that acts only at the cluster level.
|
||||
>
|
||||
>
|
||||
> More information: [Default IngressClass](https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class)
|
||||
|
||||
**Note**: This feature is offered only by API type `IngressClass` in group `networking.k8s.io` version `v1`.
|
||||
However, resource `Ingress` is supported in `networking.k8s.io/v1` and `networking.k8s.io/v1beta1`
|
||||
|
||||
## Assign Ingress Hostnames
|
||||
Bill can control ingress hostnames in the `oil` tenant to force the applications to be published only using the given hostname or set of hostnames:
|
||||
Bill can control ingress hostnames in the `oil` tenant to force the applications to be published only using the given hostname or set of hostnames:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
@@ -1316,7 +1316,7 @@ To meet this requirement, Bill needs to define network policies that deny pods b
|
||||
> 1. providing a restricted role rather than the default `admin` one
|
||||
> 2. using Calico's `GlobalNetworkPolicy`, or Cilium's `CiliumClusterwideNetworkPolicy` which are defined at the cluster-level, thus creating an order of packet filtering.
|
||||
|
||||
Also, Bill can make sure pods belonging to a tenant namespace cannot access other network infrastructures like cluster nodes, load balancers, and virtual machines running other services.
|
||||
Also, Bill can make sure pods belonging to a tenant namespace cannot access other network infrastructures like cluster nodes, load balancers, and virtual machines running other services.
|
||||
|
||||
Bill can set network policies in the tenant manifest, according to the requirements:
|
||||
|
||||
@@ -1340,7 +1340,7 @@ spec:
|
||||
- ipBlock:
|
||||
cidr: 0.0.0.0/0
|
||||
except:
|
||||
- 192.168.0.0/16
|
||||
- 192.168.0.0/16
|
||||
ingress:
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
@@ -1417,7 +1417,7 @@ Bill is a cluster admin providing a Container as a Service platform using shared
|
||||
|
||||
Alice, a Tenant Owner, can start container images using private images: according to the Kubernetes architecture, the `kubelet` will download the layers on its cache.
|
||||
|
||||
Bob, an attacker, could try to schedule a Pod on the same node where Alice is running her Pods backed by private images: they could start new Pods using `ImagePullPolicy=IfNotPresent` and be able to start them, even without required authentication since the image is cached on the node.
|
||||
Bob, an attacker, could try to schedule a Pod on the same node where Alice is running her Pods backed by private images: they could start new Pods using `ImagePullPolicy=IfNotPresent` and be able to start them, even without required authentication since the image is cached on the node.
|
||||
|
||||
To avoid this kind of attack, Bill, the cluster admin, can force Alice, the tenant owner, to start her Pods using only the allowed values for `ImagePullPolicy`, enforcing the `kubelet` to check the authorization first.
|
||||
|
||||
@@ -1605,7 +1605,7 @@ spec:
|
||||
## Assign Additional Metadata
|
||||
The cluster admin can _"taint"_ the namespaces created by tenant owners with additional metadata as labels and annotations. There is no specific semantic assigned to these labels and annotations: they will be assigned to the namespaces in the tenant as they are created. This can help the cluster admin to implement specific use cases as, for example, leave only a given tenant to be backed up by a backup service.
|
||||
|
||||
Assigns additional labels and annotations to all namespaces created in the `oil` tenant:
|
||||
Assigns additional labels and annotations to all namespaces created in the `oil` tenant:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
@@ -1655,7 +1655,7 @@ status:
|
||||
|
||||
Additionally, the cluster admin can _"taint"_ the services created by the tenant owners with additional metadata as labels and annotations.
|
||||
|
||||
Assigns additional labels and annotations to all services created in the `oil` tenant:
|
||||
Assigns additional labels and annotations to all services created in the `oil` tenant:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
@@ -1687,11 +1687,11 @@ metadata:
|
||||
spec:
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
run: nginx
|
||||
type: ClusterIP
|
||||
type: ClusterIP
|
||||
```
|
||||
|
||||
## Cordon a Tenant
|
||||
@@ -1849,7 +1849,7 @@ Doing this, Alice will not be able to use `water.acme.com`, being the tenant own
|
||||
|
||||
## Deny labels and annotations on Namespaces
|
||||
|
||||
By default, capsule allows tenant owners to add and modify any label or annotation on their namespaces.
|
||||
By default, capsule allows tenant owners to add and modify any label or annotation on their namespaces.
|
||||
|
||||
But there are some scenarios, when tenant owners should not have an ability to add or modify specific labels or annotations (for example, this can be labels used in [Kubernetes network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) which are added by cluster administrator).
|
||||
|
||||
@@ -1867,7 +1867,7 @@ spec:
|
||||
denied:
|
||||
- foo.acme.net
|
||||
- bar.acme.net
|
||||
deniedRegex: .*.acme.net
|
||||
deniedRegex: .*.acme.net
|
||||
forbiddenLabels:
|
||||
denied:
|
||||
- foo.acme.net
|
||||
@@ -1883,7 +1883,7 @@ EOF
|
||||
|
||||
When using `capsule` together with [capsule-proxy](https://github.com/clastix/capsule-proxy), Bill can allow Tenant Owners to [modify Nodes](/docs/proxy/overview).
|
||||
|
||||
By default, it will allow tenant owners to add and modify any label or annotation on their nodes.
|
||||
By default, it will allow tenant owners to add and modify any label or annotation on their nodes.
|
||||
|
||||
But there are some scenarios, when tenant owners should not have an ability to add or modify specific labels or annotations (there are some types of labels or annotations, which must be protected from modifications - for example, which are set by `cloud-providers` or `autoscalers`).
|
||||
|
||||
@@ -1894,7 +1894,7 @@ kubectl apply -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta2
|
||||
kind: CapsuleConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
name: default
|
||||
spec:
|
||||
nodeMetadata:
|
||||
forbiddenAnnotations:
|
||||
@@ -1923,7 +1923,7 @@ EOF
|
||||
|
||||
## Protecting tenants from deletion
|
||||
|
||||
Sometimes it is important to protect business critical tenants from accidental deletion.
|
||||
Sometimes it is important to protect business critical tenants from accidental deletion.
|
||||
This can be achieved by toggling `preventDeletion` specification key on the tenant:
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -258,7 +258,7 @@ patches:
|
||||
name: "(kustomize-controller|helm-controller)"
|
||||
```
|
||||
|
||||
This way tenants can't make Flux apply their Reconciliation resources with Flux's privileged Service Accounts, by not specifying a `spec.ServiceAccountName` on them.
|
||||
This way tenants can't make Flux apply their Reconciliation resources with Flux's privileged Service Accounts, by not specifying a `spec.ServiceAccountName` on them.
|
||||
|
||||
At the same time at resource-level in privileged space we still can specify a privileged ServiceAccount, and its reconciliation requests won't pass through Capsule validation:
|
||||
|
||||
@@ -279,7 +279,7 @@ spec:
|
||||
#### Kubeconfig
|
||||
|
||||
We also need to specify on Tenant's Reconciliation resources, the `Secret` with **`kubeconfig`** configured to use the **Capsule Proxy** as the API server in order to provide the Tenant GitOps Reconciler the ability to list cluster-level resources.
|
||||
The `kubeconfig` would specify also as the token the Tenant GitOps Reconciler SA token,
|
||||
The `kubeconfig` would specify also as the token the Tenant GitOps Reconciler SA token,
|
||||
|
||||
For example:
|
||||
|
||||
@@ -293,7 +293,7 @@ spec:
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: gitops-reconciler-kubeconfig
|
||||
key: kubeconfig
|
||||
key: kubeconfig
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: my-tenant
|
||||
@@ -320,14 +320,14 @@ patches:
|
||||
- patch: |
|
||||
- op: add
|
||||
path: /spec/template/spec/containers/0/args/0
|
||||
value: --no-cross-namespace-refs=true
|
||||
value: --no-cross-namespace-refs=true
|
||||
target:
|
||||
kind: Deployment
|
||||
name: "(kustomize-controller|helm-controller|notification-controller|image-reflector-controller|image-automation-controller)"
|
||||
- patch: |
|
||||
- op: add
|
||||
path: /spec/template/spec/containers/0/args/-
|
||||
value: --no-remote-bases=true
|
||||
value: --no-remote-bases=true
|
||||
target:
|
||||
kind: Deployment
|
||||
name: "kustomize-controller"
|
||||
@@ -341,7 +341,7 @@ patches:
|
||||
- patch: |
|
||||
- op: add
|
||||
path: /spec/serviceAccountName
|
||||
value: kustomize-controller
|
||||
value: kustomize-controller
|
||||
target:
|
||||
kind: Kustomization
|
||||
name: "flux-system"
|
||||
@@ -435,7 +435,7 @@ this is the required set of resources to setup a Tenant:
|
||||
```
|
||||
- `Secret` with `kubeconfig` for the Tenant GitOps Reconciler with Capsule Proxy as `kubeconfig.server` and the SA token as `kubeconfig.token`.
|
||||
> This is supported only with Service Account static tokens.
|
||||
- Flux Source and Reconciliation resources that refer to Tenant desired state. This typically points to a specific path inside a dedicated Git repository, where tenant's root configuration reside:
|
||||
- Flux Source and Reconciliation resources that refer to Tenant desired state. This typically points to a specific path inside a dedicated Git repository, where tenant's root configuration reside:
|
||||
```yaml
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: GitRepository
|
||||
@@ -497,7 +497,7 @@ spec:
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: gitops-reconciler-kubeconfig
|
||||
key: kubeconfig
|
||||
key: kubeconfig
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: my-tenant
|
||||
@@ -539,7 +539,7 @@ Furthermore, let's see if there are other vulnerabilities we are able to protect
|
||||
|
||||
##### Impersonate privileged SA
|
||||
|
||||
Then, what if a tenant tries to escalate by using one of the Flux controllers privileged `ServiceAccount`s?
|
||||
Then, what if a tenant tries to escalate by using one of the Flux controllers privileged `ServiceAccount`s?
|
||||
|
||||
As `spec.ServiceAccountName` for Reconciliation resource cannot cross-namespace reference Service Accounts, tenants are able to let Flux apply his own resources only with ServiceAccounts that reside in his own Namespaces. Which is, Namespace of the ServiceAccount and Namespace of the Reconciliation resource must match.
|
||||
|
||||
@@ -558,13 +558,13 @@ Now let's go on with the practical part.
|
||||
He could try to use privileged `ServiceAccount` by changing ownership of a privileged Namespace so that he could create Reconciliation resource there and using the privileged SA.
|
||||
This is not permitted as he can't patch Namespaces which have not been created by him. Capsule request validation would not pass.
|
||||
|
||||
For other protections against threats in this multi-tenancy scenario please see the Capsule [Multi-Tenancy Benchmark](/docs/general/mtb).
|
||||
For other protections against threats in this multi-tenancy scenario please see the Capsule [Multi-Tenancy Benchmark](/docs/general/mtb).
|
||||
|
||||
## References
|
||||
- https://fluxcd.io/docs/installation/#multi-tenancy-lockdown
|
||||
- https://fluxcd.io/blog/2022/05/may-2022-security-announcement/
|
||||
- https://github.com/clastix/capsule-proxy/issues/218
|
||||
- https://github.com/projectcapsule/capsule/issues/528
|
||||
- https://github.com/projectcapsule/capsule/issues/528
|
||||
- https://github.com/clastix/flux2-capsule-multi-tenancy
|
||||
- https://github.com/fluxcd/flux2-multi-tenancy
|
||||
- https://fluxcd.io/docs/guides/repository-structure/
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
# Guides
|
||||
Guides and tutorials on how to integrate Capsule in your Kubernetes environment.
|
||||
Guides and tutorials on how to integrate Capsule in your Kubernetes environment.
|
||||
|
||||
@@ -39,7 +39,7 @@ EOF
|
||||
```
|
||||
|
||||
> Values used for the config:
|
||||
>
|
||||
>
|
||||
> - **OIDC_CLIENT_ID**: the keycloak client ID (name) which user in Kubernetes API Server for authorization
|
||||
> - **OIDC_CLIENT_SECRET**: secret for the client (`OIDC_CLIENT_ID`). You can see it from the Keycloak UI -> Clients -> `OIDC_CLIENT_ID` -> Credentials
|
||||
> - **DASHBOARD_URL**: the Kubernetes Dashboard URL
|
||||
|
||||
@@ -137,4 +137,4 @@ to create namespace. Switch to a new terminal and try to create a namespace as u
|
||||
unset AWS_ACCESS_KEY_ID
|
||||
unset AWS_SECRET_ACCESS_KEY
|
||||
kubectl create namespace test --kubeconfig="kubeconfig-alice.conf"
|
||||
```
|
||||
```
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# Capsule on Azure Kubernetes Service
|
||||
|
||||
This reference implementation introduces the recommended starting (baseline) infrastructure architecture for implementing a multi-tenancy Azure AKS cluster using Capsule. See [CoAKS](https://github.com/clastix/coaks-baseline-architecture).
|
||||
This reference implementation introduces the recommended starting (baseline) infrastructure architecture for implementing a multi-tenancy Azure AKS cluster using Capsule. See [CoAKS](https://github.com/clastix/coaks-baseline-architecture).
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Capsule on Managed Kubernetes
|
||||
Capsule Operator can be easily installed on a Managed Kubernetes Service. Since you do not have access to the Kubernetes APIs Server, you should check with the provider of the service:
|
||||
|
||||
- the default `cluster-admin` ClusterRole is accessible
|
||||
- the default `cluster-admin` ClusterRole is accessible
|
||||
- the following Admission Webhooks are enabled on the APIs Server:
|
||||
- PodNodeSelector
|
||||
- LimitRanger
|
||||
|
||||
@@ -32,7 +32,7 @@ $ curl -k -s https://${OIDC_ISSUER}/protocol/openid-connect/token \
|
||||
The result will include an `ACCESS_TOKEN`, a `REFRESH_TOKEN`, and an `ID_TOKEN`. The access-token can generally be disregarded for Kubernetes. It would be used if the identity provider was managing roles and permissions for the users but that is done in Kubernetes itself with RBAC. The id-token is short lived while the refresh-token has longer expiration. The refresh-token is used to fetch a new id-token when the id-token expires.
|
||||
|
||||
```json
|
||||
{
|
||||
{
|
||||
"access_token":"ACCESS_TOKEN",
|
||||
"refresh_token":"REFRESH_TOKEN",
|
||||
"id_token": "ID_TOKEN",
|
||||
@@ -133,4 +133,4 @@ $ kubectl --context alice-oidc@mycluster create namespace oil-development
|
||||
$ kubectl --context alice-oidc@mycluster create namespace gas-marketing
|
||||
```
|
||||
|
||||
> _Warning_: once your `ID_TOKEN` expires, the `kubectl` OIDC Authenticator will attempt to refresh automatically your `ID_TOKEN` using the `REFRESH_TOKEN`. In case the OIDC uses a self signed CA certificate, make sure to specify it with the `idp-certificate-authority` option in your `kubeconfig` file, otherwise you'll not able to refresh the tokens.
|
||||
> _Warning_: once your `ID_TOKEN` expires, the `kubectl` OIDC Authenticator will attempt to refresh automatically your `ID_TOKEN` using the `REFRESH_TOKEN`. In case the OIDC uses a self signed CA certificate, make sure to specify it with the `idp-certificate-authority` option in your `kubeconfig` file, otherwise you'll not able to refresh the tokens.
|
||||
|
||||
@@ -188,7 +188,7 @@ spec:
|
||||
EOF
|
||||
```
|
||||
|
||||
All namespaces created by the tenant owner, will inherit the Pod Security labels:
|
||||
All namespaces created by the tenant owner, will inherit the Pod Security labels:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -255,4 +255,4 @@ kubectl --kubeconfig alice-oil.kubeconfig label ns oil-production \
|
||||
--overwrite
|
||||
|
||||
Error from server (Label pod-security.kubernetes.io/audit is forbidden for namespaces in the current Tenant ...
|
||||
```
|
||||
```
|
||||
|
||||
@@ -203,5 +203,3 @@ spec:
|
||||
## Cluster-wide resources and Rancher Shell interface
|
||||
|
||||
For using the Rancher Shell and cluster-wide resources as tenant user, please follow [this guide](./capsule-proxy-rancher.md).
|
||||
|
||||
|
||||
|
||||
@@ -24,4 +24,3 @@ With Capsule Proxy users can also access cluster-wide resources, as configured b
|
||||
You can read in detail how the integration works and how to configure it, in the following guides.
|
||||
- [How to integrate Rancher Projects with Capsule Tenants](./capsule-proxy-rancher.md)
|
||||
- [How to enable cluster-wide resources and Rancher shell access](./capsule-proxy-rancher.md).
|
||||
|
||||
|
||||
@@ -37,4 +37,3 @@ Configure an OIDC authentication provider, with Client with issuer, return URLs
|
||||
1. In Rancher as an administrator, set the user custom role with `get` of Cluster.
|
||||
1. In Rancher as an administrator, add the Rancher user ID of the just-logged in user as Owner of a `Tenant`.
|
||||
1. (optional) configure `proxySettings` for the `Tenant` to enable tenant users to access cluster-wide resources.
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ A minor bump has been requested due to some missing enums in the Tenant resource
|
||||
Using the `kubectl` or Helm, scale down the Capsule controller manager: this is required to avoid the old Capsule version from processing objects that aren't yet installed as a CRD.
|
||||
|
||||
```
|
||||
helm upgrade -n capsule-system capsule --set "replicaCount=0"
|
||||
helm upgrade -n capsule-system capsule --set "replicaCount=0"
|
||||
```
|
||||
|
||||
## Patch the Tenant custom resource definition
|
||||
@@ -71,7 +71,7 @@ This will start the Operator with the latest changes, and perform the required s
|
||||
Using the `kubectl` or Helm, scale down the Capsule controller manager: this is required to avoid the old Capsule version from processing objects that aren't yet installed as a CRD.
|
||||
|
||||
```
|
||||
helm upgrade -n capsule-system capsule --set "replicaCount=0"
|
||||
helm upgrade -n capsule-system capsule --set "replicaCount=0"
|
||||
```
|
||||
|
||||
> Ensure that all the Pods have been removed correctly.
|
||||
@@ -112,7 +112,7 @@ kubectl apply -f https://raw.githubusercontent.com/clastix/capsule/v0.2.1/charts
|
||||
>
|
||||
> ```bash
|
||||
> CUSTOM_NS="tenancy-operations"
|
||||
>
|
||||
>
|
||||
> for CR in capsuleconfigurations.capsule.clastix.io globaltenantresources.capsule.clastix.io tenantresources.capsule.clastix.io tenants.capsule.clastix.io; do
|
||||
> kubectl patch crd capsuleconfigurations.capsule.clastix.io --type='json' -p=" [{'op': 'replace', 'path': '/spec/conversion/webhook/clientConfig/service/namespace', 'value': "${CUSTOM_NS}"}]"
|
||||
> done
|
||||
@@ -162,7 +162,7 @@ oil 3 0 alice User {"kubernete
|
||||
You should see all the previous Tenant resources converted in the new format and structure.
|
||||
|
||||
```
|
||||
$: kubectl get tenants.v1beta2.capsule.clastix.io
|
||||
$: kubectl get tenants.v1beta2.capsule.clastix.io
|
||||
NAME STATE NAMESPACE QUOTA NAMESPACE COUNT NODE SELECTOR AGE
|
||||
oil Active 3 0 {"kubernetes.io/os":"linux"} 3m38s
|
||||
```
|
||||
@@ -180,7 +180,7 @@ oil Active 3 0 {"kubernetes.io/os":"linux"}
|
||||
If you're using Helm as package manager, all the Operator resources such as Deployment, Service, Role Binding, etc. must be deleted.
|
||||
|
||||
```
|
||||
helm uninstall -n capsule-system capsule
|
||||
helm uninstall -n capsule-system capsule
|
||||
```
|
||||
|
||||
Ensure that everything has been removed correctly, especially the Secret resources.
|
||||
@@ -233,7 +233,7 @@ oil 3 0 alice User {"kubernete
|
||||
You should see all the previous Tenant resources converted into the new format and structure.
|
||||
|
||||
```
|
||||
$: kubectl get tenants.v1beta1.capsule.clastix.io
|
||||
$: kubectl get tenants.v1beta1.capsule.clastix.io
|
||||
NAME STATE NAMESPACE QUOTA NAMESPACE COUNT NODE SELECTOR AGE
|
||||
oil Active 3 0 {"kubernetes.io/os":"linux"} 3m38s
|
||||
```
|
||||
|
||||
@@ -9,9 +9,9 @@ Kubernetes introduces the _Namespace_ object type to create logical partitions o
|
||||
|
||||
## Entering Capsule
|
||||
|
||||
Capsule takes a different approach. In a single cluster, the Capsule Controller aggregates multiple namespaces in a lightweight abstraction called _Tenant_, basically a grouping of Kubernetes Namespaces. Within each tenant, users are free to create their namespaces and share all the assigned resources.
|
||||
Capsule takes a different approach. In a single cluster, the Capsule Controller aggregates multiple namespaces in a lightweight abstraction called _Tenant_, basically a grouping of Kubernetes Namespaces. Within each tenant, users are free to create their namespaces and share all the assigned resources.
|
||||
|
||||
On the other side, the Capsule Policy Engine keeps the different tenants isolated from each other. _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant. Then users are free to operate their tenants in autonomy, without the intervention of the cluster administrator.
|
||||
On the other side, the Capsule Policy Engine keeps the different tenants isolated from each other. _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant. Then users are free to operate their tenants in autonomy, without the intervention of the cluster administrator.
|
||||
|
||||
|
||||

|
||||
|
||||
@@ -1 +1 @@
|
||||
<svg fill="none" stroke="currentColor" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 10"><path d="M15 1.2l-7 7-7-7" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
|
||||
<svg fill="none" stroke="currentColor" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 10"><path d="M15 1.2l-7 7-7-7" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 192 B After Width: | Height: | Size: 193 B |
|
Before Width: | Height: | Size: 6.0 KiB After Width: | Height: | Size: 6.0 KiB |
@@ -1 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-github"><path d="M9 19c-5 1.5-5-2.5-7-3m14 6v-3.87a3.37 3.37 0 0 0-.94-2.61c3.14-.35 6.44-1.54 6.44-7A5.44 5.44 0 0 0 20 4.77 5.07 5.07 0 0 0 19.91 1S18.73.65 16 2.48a13.38 13.38 0 0 0-7 0C6.27.65 5.09 1 5.09 1A5.07 5.07 0 0 0 5 4.77a5.44 5.44 0 0 0-1.5 3.78c0 5.42 3.3 6.61 6.44 7A3.37 3.37 0 0 0 9 18.13V22"></path></svg>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-github"><path d="M9 19c-5 1.5-5-2.5-7-3m14 6v-3.87a3.37 3.37 0 0 0-.94-2.61c3.14-.35 6.44-1.54 6.44-7A5.44 5.44 0 0 0 20 4.77 5.07 5.07 0 0 0 19.91 1S18.73.65 16 2.48a13.38 13.38 0 0 0-7 0C6.27.65 5.09 1 5.09 1A5.07 5.07 0 0 0 5 4.77a5.44 5.44 0 0 0-1.5 3.78c0 5.42 3.3 6.61 6.44 7A3.37 3.37 0 0 0 9 18.13V22"></path></svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 504 B After Width: | Height: | Size: 505 B |
@@ -1 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-linkedin"><path d="M16 8a6 6 0 0 1 6 6v7h-4v-7a2 2 0 0 0-2-2 2 2 0 0 0-2 2v7h-4v-7a6 6 0 0 1 6-6z"></path><rect x="2" y="9" width="4" height="12"></rect><circle cx="4" cy="4" r="2"></circle></svg>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-linkedin"><path d="M16 8a6 6 0 0 1 6 6v7h-4v-7a2 2 0 0 0-2-2 2 2 0 0 0-2 2v7h-4v-7a6 6 0 0 1 6-6z"></path><rect x="2" y="9" width="4" height="12"></rect><circle cx="4" cy="4" r="2"></circle></svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 377 B After Width: | Height: | Size: 378 B |
@@ -1 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-search"><circle cx="11" cy="11" r="8"></circle><line x1="21" y1="21" x2="16.65" y2="16.65"></line></svg>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-search"><circle cx="11" cy="11" r="8"></circle><line x1="21" y1="21" x2="16.65" y2="16.65"></line></svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 308 B After Width: | Height: | Size: 309 B |
@@ -1 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-slack"><path d="M14.5 10c-.83 0-1.5-.67-1.5-1.5v-5c0-.83.67-1.5 1.5-1.5s1.5.67 1.5 1.5v5c0 .83-.67 1.5-1.5 1.5z"></path><path d="M20.5 10H19V8.5c0-.83.67-1.5 1.5-1.5s1.5.67 1.5 1.5-.67 1.5-1.5 1.5z"></path><path d="M9.5 14c.83 0 1.5.67 1.5 1.5v5c0 .83-.67 1.5-1.5 1.5S8 21.33 8 20.5v-5c0-.83.67-1.5 1.5-1.5z"></path><path d="M3.5 14H5v1.5c0 .83-.67 1.5-1.5 1.5S2 16.33 2 15.5 2.67 14 3.5 14z"></path><path d="M14 14.5c0-.83.67-1.5 1.5-1.5h5c.83 0 1.5.67 1.5 1.5s-.67 1.5-1.5 1.5h-5c-.83 0-1.5-.67-1.5-1.5z"></path><path d="M15.5 19H14v1.5c0 .83.67 1.5 1.5 1.5s1.5-.67 1.5-1.5-.67-1.5-1.5-1.5z"></path><path d="M10 9.5C10 8.67 9.33 8 8.5 8h-5C2.67 8 2 8.67 2 9.5S2.67 11 3.5 11h5c.83 0 1.5-.67 1.5-1.5z"></path><path d="M8.5 5H10V3.5C10 2.67 9.33 2 8.5 2S7 2.67 7 3.5 7.67 5 8.5 5z"></path></svg>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-slack"><path d="M14.5 10c-.83 0-1.5-.67-1.5-1.5v-5c0-.83.67-1.5 1.5-1.5s1.5.67 1.5 1.5v5c0 .83-.67 1.5-1.5 1.5z"></path><path d="M20.5 10H19V8.5c0-.83.67-1.5 1.5-1.5s1.5.67 1.5 1.5-.67 1.5-1.5 1.5z"></path><path d="M9.5 14c.83 0 1.5.67 1.5 1.5v5c0 .83-.67 1.5-1.5 1.5S8 21.33 8 20.5v-5c0-.83.67-1.5 1.5-1.5z"></path><path d="M3.5 14H5v1.5c0 .83-.67 1.5-1.5 1.5S2 16.33 2 15.5 2.67 14 3.5 14z"></path><path d="M14 14.5c0-.83.67-1.5 1.5-1.5h5c.83 0 1.5.67 1.5 1.5s-.67 1.5-1.5 1.5h-5c-.83 0-1.5-.67-1.5-1.5z"></path><path d="M15.5 19H14v1.5c0 .83.67 1.5 1.5 1.5s1.5-.67 1.5-1.5-.67-1.5-1.5-1.5z"></path><path d="M10 9.5C10 8.67 9.33 8 8.5 8h-5C2.67 8 2 8.67 2 9.5S2.67 11 3.5 11h5c.83 0 1.5-.67 1.5-1.5z"></path><path d="M8.5 5H10V3.5C10 2.67 9.33 2 8.5 2S7 2.67 7 3.5 7.67 5 8.5 5z"></path></svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 976 B After Width: | Height: | Size: 977 B |
@@ -1 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-twitter"><path d="M23 3a10.9 10.9 0 0 1-3.14 1.53 4.48 4.48 0 0 0-7.86 3v1A10.66 10.66 0 0 1 3 4s-4 9 5 13a11.64 11.64 0 0 1-7 2c9 5 20 0 20-11.5a4.5 4.5 0 0 0-.08-.83A7.72 7.72 0 0 0 23 3z"></path></svg>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-twitter"><path d="M23 3a10.9 10.9 0 0 1-3.14 1.53 4.48 4.48 0 0 0-7.86 3v1A10.66 10.66 0 0 1 3 4s-4 9 5 13a11.64 11.64 0 0 1-7 2c9 5 20 0 20-11.5a4.5 4.5 0 0 0-.08-.83A7.72 7.72 0 0 0 23 3z"></path></svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 385 B After Width: | Height: | Size: 386 B |
|
Before Width: | Height: | Size: 5.3 KiB After Width: | Height: | Size: 5.3 KiB |
@@ -1 +1 @@
|
||||
<svg viewBox="0 0 305 269" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M112 56.2c0-4.5-.4-9 0-13.5 0-1.2 2.1-3.1 3.4-3.3 7-.8 14-1.4 21-1.8 2.5 0 3.6-1 4.4-3 2-5.2 4-10.3 6.3-15.4.9-1.9.6-2.8-1.2-4-4.4-3.2-5-8.5-1.6-12.1 3.7-4 8.9-4.1 12.6-.2 3.5 3.7 3.1 9-1.3 12.1-2 1.4-2.1 2.5-1.3 4.5 2.2 5 4 10.3 6.4 15.2a6.2 6.2 0 0 0 4.2 3c7.6.7 15.2 1 22.8 1.6 2.7.1 4.2 1.2 4.2 4.1.1 4 .5 7.9.7 11.9-1.5 0-3 .2-4.5 0-12.6-.7-37.5-2.6-37.5-2.6L112 56.2Z" fill="#274872"/><path d="M13.4 193.8c2.9-4.2 5.7-8.3 8.7-12.4.8-1.1 2.1-2 3.2-2.9 8.7 2.8 17.2 6 26 8.2 7.3 1.8 7.6 1.3 11.1-5.6 9.6-18.2 19-36.5 28.5-54.9 3.5-6.9 7-13.8 10.3-20.8 1.9-4 1-5.3-3.3-5.1-5 .2-10 .6-15.2 1 1.8-2.4 3.3-4.9 5.2-7 4.5-4.9 9.2-9.6 13.8-14.4h5.5l43.9-3.2-.3 2.5v145.2c-15.7.9-30.4 1-47.4-.6-16-1-30.7-3.5-46.6-5.5A119 119 0 0 1 17.6 207c-4.8-2.4-5.9-5.4-4.3-10.4.3-.9.1-1.8.1-2.7Z" fill="#5783AB"/><path d="M150.9 224.4V76.7c5.6.1 11.4 0 17 .3 11.4.8 22.7 1.9 34 2.8 2.7 3 5.5 6.1 8 9.3 3.5 4.2 6.7 8.5 10 12.7-6-.5-12-1.2-18.1-1.5-4.2-.2-5 1.2-3.1 4.8l29.6 57.8c4 7.8 8.2 15.5 12.3 23.3 1.4 2.8 3.6 3.9 6.6 3.2l34.5-8.6c1.8 3 3.5 6.2 5.4 9.1 1 1.4 2.2 2.6 3.2 3.9 0 .5-.1 1.2 0 1.6 3.4 6.3 0 10-5.7 12.6a140.8 140.8 0 0 1-44.6 11.4l-38.6 3.7c-6.3.6-12.5 1.5-18.8 1.8-4.5.3-9.2-.1-13.8-.3H151l-.1-.2Z" fill="#EAECEC"/><path d="m281.7 180.8-34.5 8.6c-3 .7-5.2-.4-6.6-3.2-4-7.8-8.3-15.5-12.3-23.3L198.7 105c-1.9-3.6-1-5 3-4.8 6.1.3 12.1 1 18.2 1.5 1.9.4 2.5.1 5 3.7 17.1 24.4 37 47.8 54.9 71.7.8 1 1.3 2.4 2 3.6Z" fill="#5783AB"/><path d="m23.5 180 33.8 8.2c3 .7 5.1-.3 6.6-3.2L76 161.7c10-19.2 17.3-37.2 26.4-56.7 1.8-3.5.7-5-3.4-4.7-6 .3-10.1.4-16.2 1a10 10 0 0 0-3.8 3.4c-18 23.7-35.8 47.6-53.7 71.5-.8 1.1-1.3 2.5-1.9 3.7Z" fill="#EAECEC"/><path d="M290.4 193.8c0 .5-.2 1.2 0 1.6 3.3 6.3-.2 10-5.8 12.6a140.8 140.8 0 0 1-44.5 11.4l-38.7 3.7c-6.3.7-13 1.3-19 1.3-4.6 0-9 .4-13.6.2H151l-.1-.2c-15.4.4-28.8.4-46.2-1.2-16-1.4-32-3-47.9-5A119 119 0 0 1 17.6 207c-4.8-2.4-5.9-5.4-4.3-10.5.3-.8.1-1.7.1-2.6C7 196.3 2.1 200.2.6 207.5a25 25 0 0 0 5.7 21.7c1.5 1.8 2.4 3.2.1 5.2-.7.6-.8 3.3-.2 4a51 51 0 0 0 22.4 12.3c2.4.6 4.1 0 4.6-2.9.1-.6 1.7-1.6 2.3-1.5l26 6.2c1.9.4 2.6 1 1.4 3-1.4 2.4-.4 3.9 2 5 8.7 3.5 17.9 3.9 27 4.3 2.2 0 4.5-1 3.5-4-1-3.1.5-3.1 2.8-3 8.6.8 17.2 1.5 25.8 2 3.1.2 4 1 2.4 4s-.3 4.8 2.9 4.8c14.8 0 29.6.2 44.4-.1 4.6 0 5.1-1.3 3.4-5.6-.8-2.1-1-2.9 1.8-3a646 646 0 0 0 27.4-2.2c2.5-.2 3 .4 2.6 2.6-.6 2.3 0 4 2.7 4.1 9.5.2 18.9-.5 27.8-4.3 2-.9 3-2.1 1.7-4.3-1.7-2.7 0-3.1 2.2-3.6a901 901 0 0 0 24.6-6c1.8-.4 3-.7 3.2 1.6.3 3 2.1 3.4 4.6 2.6 8-2.3 15.5-6 21.6-12 .6-.6 1-3 .5-3.3-3.2-2.7-1.4-4.7.5-7 8-9.6 9.5-27.9-8-34.3Z" fill="#274872"/><path d="M192.8 70.5a4896.2 4896.2 0 0 1-39.1-1h-2.5l-.6.1V52.1c12.6 1 24.6 2 37.2 2.7h4.8l.2 15.7Z" fill="#EAECEC"/><path d="M111.8 71c6.7-.3 10-.4 16.8-.5l19-.5h2.4s.6-.6.6-.4V52.1c-12.6 1-21.3 2-33.8 2.7H112l-.2 16.2Z" fill="#5783AB"/><path d="m196 70.7-4-.3-40-1h-.1l-40 1-4 .3c-6.2.5-6.8 1.1-6.7 7.2 0 .7.2 1.3.3 2h5.7c15-1 28.7-2.1 43.6-3.2l46 3.1c1.8.2 3.7.1 5.6.1.1-.7.3-1.3.3-2 .2-6-.4-6.7-6.7-7.2Z" fill="#274872"/></svg>
|
||||
<svg viewBox="0 0 305 269" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M112 56.2c0-4.5-.4-9 0-13.5 0-1.2 2.1-3.1 3.4-3.3 7-.8 14-1.4 21-1.8 2.5 0 3.6-1 4.4-3 2-5.2 4-10.3 6.3-15.4.9-1.9.6-2.8-1.2-4-4.4-3.2-5-8.5-1.6-12.1 3.7-4 8.9-4.1 12.6-.2 3.5 3.7 3.1 9-1.3 12.1-2 1.4-2.1 2.5-1.3 4.5 2.2 5 4 10.3 6.4 15.2a6.2 6.2 0 0 0 4.2 3c7.6.7 15.2 1 22.8 1.6 2.7.1 4.2 1.2 4.2 4.1.1 4 .5 7.9.7 11.9-1.5 0-3 .2-4.5 0-12.6-.7-37.5-2.6-37.5-2.6L112 56.2Z" fill="#274872"/><path d="M13.4 193.8c2.9-4.2 5.7-8.3 8.7-12.4.8-1.1 2.1-2 3.2-2.9 8.7 2.8 17.2 6 26 8.2 7.3 1.8 7.6 1.3 11.1-5.6 9.6-18.2 19-36.5 28.5-54.9 3.5-6.9 7-13.8 10.3-20.8 1.9-4 1-5.3-3.3-5.1-5 .2-10 .6-15.2 1 1.8-2.4 3.3-4.9 5.2-7 4.5-4.9 9.2-9.6 13.8-14.4h5.5l43.9-3.2-.3 2.5v145.2c-15.7.9-30.4 1-47.4-.6-16-1-30.7-3.5-46.6-5.5A119 119 0 0 1 17.6 207c-4.8-2.4-5.9-5.4-4.3-10.4.3-.9.1-1.8.1-2.7Z" fill="#5783AB"/><path d="M150.9 224.4V76.7c5.6.1 11.4 0 17 .3 11.4.8 22.7 1.9 34 2.8 2.7 3 5.5 6.1 8 9.3 3.5 4.2 6.7 8.5 10 12.7-6-.5-12-1.2-18.1-1.5-4.2-.2-5 1.2-3.1 4.8l29.6 57.8c4 7.8 8.2 15.5 12.3 23.3 1.4 2.8 3.6 3.9 6.6 3.2l34.5-8.6c1.8 3 3.5 6.2 5.4 9.1 1 1.4 2.2 2.6 3.2 3.9 0 .5-.1 1.2 0 1.6 3.4 6.3 0 10-5.7 12.6a140.8 140.8 0 0 1-44.6 11.4l-38.6 3.7c-6.3.6-12.5 1.5-18.8 1.8-4.5.3-9.2-.1-13.8-.3H151l-.1-.2Z" fill="#EAECEC"/><path d="m281.7 180.8-34.5 8.6c-3 .7-5.2-.4-6.6-3.2-4-7.8-8.3-15.5-12.3-23.3L198.7 105c-1.9-3.6-1-5 3-4.8 6.1.3 12.1 1 18.2 1.5 1.9.4 2.5.1 5 3.7 17.1 24.4 37 47.8 54.9 71.7.8 1 1.3 2.4 2 3.6Z" fill="#5783AB"/><path d="m23.5 180 33.8 8.2c3 .7 5.1-.3 6.6-3.2L76 161.7c10-19.2 17.3-37.2 26.4-56.7 1.8-3.5.7-5-3.4-4.7-6 .3-10.1.4-16.2 1a10 10 0 0 0-3.8 3.4c-18 23.7-35.8 47.6-53.7 71.5-.8 1.1-1.3 2.5-1.9 3.7Z" fill="#EAECEC"/><path d="M290.4 193.8c0 .5-.2 1.2 0 1.6 3.3 6.3-.2 10-5.8 12.6a140.8 140.8 0 0 1-44.5 11.4l-38.7 3.7c-6.3.7-13 1.3-19 1.3-4.6 0-9 .4-13.6.2H151l-.1-.2c-15.4.4-28.8.4-46.2-1.2-16-1.4-32-3-47.9-5A119 119 0 0 1 17.6 207c-4.8-2.4-5.9-5.4-4.3-10.5.3-.8.1-1.7.1-2.6C7 196.3 2.1 200.2.6 207.5a25 25 0 0 0 5.7 21.7c1.5 1.8 2.4 3.2.1 5.2-.7.6-.8 3.3-.2 4a51 51 0 0 0 22.4 12.3c2.4.6 4.1 0 4.6-2.9.1-.6 1.7-1.6 2.3-1.5l26 6.2c1.9.4 2.6 1 1.4 3-1.4 2.4-.4 3.9 2 5 8.7 3.5 17.9 3.9 27 4.3 2.2 0 4.5-1 3.5-4-1-3.1.5-3.1 2.8-3 8.6.8 17.2 1.5 25.8 2 3.1.2 4 1 2.4 4s-.3 4.8 2.9 4.8c14.8 0 29.6.2 44.4-.1 4.6 0 5.1-1.3 3.4-5.6-.8-2.1-1-2.9 1.8-3a646 646 0 0 0 27.4-2.2c2.5-.2 3 .4 2.6 2.6-.6 2.3 0 4 2.7 4.1 9.5.2 18.9-.5 27.8-4.3 2-.9 3-2.1 1.7-4.3-1.7-2.7 0-3.1 2.2-3.6a901 901 0 0 0 24.6-6c1.8-.4 3-.7 3.2 1.6.3 3 2.1 3.4 4.6 2.6 8-2.3 15.5-6 21.6-12 .6-.6 1-3 .5-3.3-3.2-2.7-1.4-4.7.5-7 8-9.6 9.5-27.9-8-34.3Z" fill="#274872"/><path d="M192.8 70.5a4896.2 4896.2 0 0 1-39.1-1h-2.5l-.6.1V52.1c12.6 1 24.6 2 37.2 2.7h4.8l.2 15.7Z" fill="#EAECEC"/><path d="M111.8 71c6.7-.3 10-.4 16.8-.5l19-.5h2.4s.6-.6.6-.4V52.1c-12.6 1-21.3 2-33.8 2.7H112l-.2 16.2Z" fill="#5783AB"/><path d="m196 70.7-4-.3-40-1h-.1l-40 1-4 .3c-6.2.5-6.8 1.1-6.7 7.2 0 .7.2 1.3.3 2h5.7c15-1 28.7-2.1 43.6-3.2l46 3.1c1.8.2 3.7.1 5.6.1.1-.7.3-1.3.3-2 .2-6-.4-6.7-6.7-7.2Z" fill="#274872"/></svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 3.0 KiB After Width: | Height: | Size: 3.0 KiB |
@@ -46,4 +46,4 @@ export default {
|
||||
.max-h-99 {
|
||||
max-height: 99rem;
|
||||
}
|
||||
</style>
|
||||
</style>
|
||||
|
||||
@@ -29,4 +29,4 @@ export default {
|
||||
</script>
|
||||
|
||||
<style>
|
||||
</style>
|
||||
</style>
|
||||
|
||||
@@ -68,4 +68,4 @@ export default {
|
||||
</script>
|
||||
|
||||
<style>
|
||||
</style>
|
||||
</style>
|
||||
|
||||
@@ -404,4 +404,4 @@ export default {
|
||||
},
|
||||
},
|
||||
};
|
||||
</script>
|
||||
</script>
|
||||
|
||||
@@ -132,4 +132,4 @@ export default {
|
||||
.active {
|
||||
@apply text-blue-400 font-semibold;
|
||||
}
|
||||
</style>
|
||||
</style>
|
||||
|
||||
@@ -105,4 +105,4 @@ export default {
|
||||
</script>
|
||||
|
||||
<style>
|
||||
</style>
|
||||
</style>
|
||||
|
||||
@@ -21,4 +21,4 @@ export default {
|
||||
AppButton,
|
||||
},
|
||||
};
|
||||
</script>
|
||||
</script>
|
||||
|
||||
@@ -166,4 +166,4 @@ export default {
|
||||
// background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' version='1.1' xmlns:xlink='http://www.w3.org/1999/xlink' xmlns:svgjs='http://svgjs.com/svgjs' width='1440' height='250' preserveAspectRatio='none' viewBox='0 0 1440 250'%3e%3cg mask='url(%26quot%3b%23SvgjsMask1012%26quot%3b)' fill='none'%3e%3crect width='1440' height='250' x='0' y='0' fill='rgba(31%2c 41%2c 55%2c 1)'%3e%3c/rect%3e%3cpath d='M36 250L286 0L571 0L321 250z' fill='url(%23SvgjsLinearGradient1013)'%3e%3c/path%3e%3cpath d='M264.6 250L514.6 0L678.1 0L428.1 250z' fill='url(%23SvgjsLinearGradient1013)'%3e%3c/path%3e%3cpath d='M507.20000000000005 250L757.2 0L1073.7 0L823.7 250z' fill='url(%23SvgjsLinearGradient1013)'%3e%3c/path%3e%3cpath d='M725.8000000000001 250L975.8000000000001 0L1302.8000000000002 0L1052.8000000000002 250z' fill='url(%23SvgjsLinearGradient1013)'%3e%3c/path%3e%3cpath d='M1439 250L1189 0L995.5 0L1245.5 250z' fill='url(%23SvgjsLinearGradient1014)'%3e%3c/path%3e%3cpath d='M1157.4 250L907.4000000000001 0L817.4000000000001 0L1067.4 250z' fill='url(%23SvgjsLinearGradient1014)'%3e%3c/path%3e%3cpath d='M961.8 250L711.8 0L383.29999999999995 0L633.3 250z' fill='url(%23SvgjsLinearGradient1014)'%3e%3c/path%3e%3cpath d='M688.1999999999999 250L438.19999999999993 0L208.69999999999993 0L458.69999999999993 250z' fill='url(%23SvgjsLinearGradient1014)'%3e%3c/path%3e%3cpath d='M1247.2258701549645 250L1440 57.225870154964355L1440 250z' fill='url(%23SvgjsLinearGradient1013)'%3e%3c/path%3e%3cpath d='M0 250L192.77412984503565 250L 0 57.225870154964355z' fill='url(%23SvgjsLinearGradient1014)'%3e%3c/path%3e%3c/g%3e%3cdefs%3e%3cmask id='SvgjsMask1012'%3e%3crect width='1440' height='250' fill='white'%3e%3c/rect%3e%3c/mask%3e%3clinearGradient x1='0%25' y1='100%25' x2='100%25' y2='0%25' id='SvgjsLinearGradient1013'%3e%3cstop stop-color='rgba(87%2c 131%2c 171%2c 0.14)' offset='0'%3e%3c/stop%3e%3cstop stop-opacity='0' stop-color='rgba(87%2c 131%2c 171%2c 0.14)' offset='0.66'%3e%3c/stop%3e%3c/linearGradient%3e%3clinearGradient x1='100%25' y1='100%25' x2='0%25' y2='0%25' id='SvgjsLinearGradient1014'%3e%3cstop stop-color='rgba(87%2c 131%2c 171%2c 0.14)' offset='0'%3e%3c/stop%3e%3cstop stop-opacity='0' stop-color='rgba(87%2c 131%2c 171%2c 0.14)' offset='0.66'%3e%3c/stop%3e%3c/linearGradient%3e%3c/defs%3e%3c/svg%3e");
|
||||
background-size: cover;
|
||||
}
|
||||
</style>
|
||||
</style>
|
||||
|
||||
@@ -138,4 +138,4 @@ export default {
|
||||
}
|
||||
}
|
||||
}
|
||||
</style>
|
||||
</style>
|
||||
|
||||
2
docs/static/README.md
vendored
@@ -1,3 +1,3 @@
|
||||
Add static files here. Files in this directory will be copied directly to `dist` folder during build. For example, /static/robots.txt will be located at https://yoursite.com/robots.txt.
|
||||
|
||||
This file should be deleted.
|
||||
This file should be deleted.
|
||||
|
||||
2
docs/template/reference-cr.tmpl
vendored
@@ -108,4 +108,4 @@ Resource Types:
|
||||
|
||||
{{- end}}{{/* range .Types */}}
|
||||
{{- end}}{{/* range .Kinds */}}
|
||||
{{- end}}{{/* range .Groups */}}
|
||||
{{- end}}{{/* range .Groups */}}
|
||||
|
||||
@@ -46,13 +46,13 @@ echo `date`": INFO: Wait then Kind cluster be ready. Wait only 30 seconds"
|
||||
counter=0
|
||||
while true
|
||||
do
|
||||
if [ $counter == 30 ]; then
|
||||
if [ $counter == 30 ]; then
|
||||
echo `date`": ERROR: Kind cluster not ready for too long"
|
||||
error_action
|
||||
fi
|
||||
|
||||
kubectl get nodes | grep " Ready " &>/dev/null
|
||||
if [ $? == 0 ]; then
|
||||
if [ $? == 0 ]; then
|
||||
break
|
||||
fi
|
||||
|
||||
@@ -75,13 +75,13 @@ echo `date`": INFO: Wait then capsule POD be ready. Wait only 30 seconds"
|
||||
counter=0
|
||||
while true
|
||||
do
|
||||
if [ $counter == 30 ]; then
|
||||
if [ $counter == 30 ]; then
|
||||
echo `date`": ERROR: Kind cluster not ready for too long"
|
||||
error_action
|
||||
fi
|
||||
|
||||
kubectl get pod -n capsule-system | grep " Running " &>/dev/null
|
||||
if [ $? == 0 ]; then
|
||||
if [ $? == 0 ]; then
|
||||
break
|
||||
fi
|
||||
|
||||
@@ -142,4 +142,4 @@ fi
|
||||
|
||||
echo `date`": INFO: All ok"
|
||||
|
||||
cleanup_action
|
||||
cleanup_action
|
||||
|
||||
@@ -21,12 +21,12 @@ TENANTS=""
|
||||
# Outputs:
|
||||
# print usage with examples.
|
||||
usage () {
|
||||
printf "Usage: $0 [flags] commands\n"
|
||||
printf "Flags:\n"
|
||||
printf "\t-c, --kubeconfig /path/to/config\tPath to the kubeconfig file to use for CLI requests.\n"
|
||||
printf "\t-t, --tenant \"gas oil\"\t\tSpecify one or more tenants to be restored.\n"
|
||||
printf "Commands:\n"
|
||||
printf "\trestore\t\t\tPerform the restore on the cluster, patching the right object fields.\n"
|
||||
printf "Usage: $0 [flags] commands\n"
|
||||
printf "Flags:\n"
|
||||
printf "\t-c, --kubeconfig /path/to/config\tPath to the kubeconfig file to use for CLI requests.\n"
|
||||
printf "\t-t, --tenant \"gas oil\"\t\tSpecify one or more tenants to be restored.\n"
|
||||
printf "Commands:\n"
|
||||
printf "\trestore\t\t\tPerform the restore on the cluster, patching the right object fields.\n"
|
||||
printf "\n"
|
||||
printf "E.g. [restore]:\t$0 -c /path/to/kubeconfig restore\n"
|
||||
printf "E.g. [restore]:\t$0 -t \"oil\" restore\n"
|
||||
@@ -197,4 +197,3 @@ case "${@: -1}" in
|
||||
*)
|
||||
break
|
||||
esac
|
||||
|
||||
|
||||