mirror of
https://github.com/rancher/k3k.git
synced 2026-03-02 17:50:36 +00:00
Compare commits
30 Commits
chart-0.3.
...
v0.3.3-rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
87032c8195 | ||
|
|
78e0c307b8 | ||
|
|
5758b880a5 | ||
|
|
2655d792cc | ||
|
|
93e1c85468 | ||
|
|
8fbe4b93e8 | ||
|
|
2515d19187 | ||
|
|
2b1448ffb8 | ||
|
|
fdb5bb9c19 | ||
|
|
45fdbf9363 | ||
|
|
3590b48d91 | ||
|
|
cca3d0c309 | ||
|
|
f228c4536c | ||
|
|
37fe4493e7 | ||
|
|
6a22f6f704 | ||
|
|
96a4341dfb | ||
|
|
510ab4bb8a | ||
|
|
9d96ee1e9c | ||
|
|
7c424821ca | ||
|
|
a2f5fd7592 | ||
|
|
c8df86b83b | ||
|
|
d41d2b8c31 | ||
|
|
7cb2399b89 | ||
|
|
90568f24b1 | ||
|
|
0843a9e313 | ||
|
|
b58578788c | ||
|
|
c4cc1e69cd | ||
|
|
bd947c0fcb | ||
|
|
b0b61f8d8e | ||
|
|
3281d54c6c |
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -33,5 +33,5 @@ jobs:
|
||||
args: --clean --snapshot
|
||||
env:
|
||||
REPO: ${{ github.repository }}
|
||||
REGISTRY:
|
||||
REGISTRY: ""
|
||||
|
||||
2
.github/workflows/test.yaml
vendored
2
.github/workflows/test.yaml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
args: --timeout=5m
|
||||
version: v1.60
|
||||
version: v1.64
|
||||
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
33
Makefile
33
Makefile
@@ -4,15 +4,13 @@ VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*")
|
||||
|
||||
## Dependencies
|
||||
|
||||
GOLANGCI_LINT_VERSION := v1.63.4
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.14.0
|
||||
GOLANGCI_LINT_VERSION := v1.64.8
|
||||
GINKGO_VERSION ?= v2.21.0
|
||||
ENVTEST_VERSION ?= latest
|
||||
ENVTEST_VERSION ?= v0.0.0-20250505003155-b6c5897febe5
|
||||
ENVTEST_K8S_VERSION := 1.31.0
|
||||
CRD_REF_DOCS_VER ?= v0.1.0
|
||||
|
||||
GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
|
||||
CONTROLLER_GEN ?= go run sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)
|
||||
GINKGO ?= go run github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION)
|
||||
CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
|
||||
|
||||
@@ -22,7 +20,7 @@ export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin
|
||||
|
||||
|
||||
.PHONY: all
|
||||
all: version build-crds build package ## Run 'make' or 'make all' to run 'version', 'build-crds', 'build' and 'package'
|
||||
all: version generate build package ## Run 'make' or 'make all' to run 'version', 'generate', 'build' and 'package'
|
||||
|
||||
.PHONY: version
|
||||
version: ## Print the current version
|
||||
@@ -51,7 +49,6 @@ push-%:
|
||||
docker push $(REPO)/$*:latest
|
||||
docker push $(REPO)/$*:dev
|
||||
|
||||
|
||||
.PHONY: test
|
||||
test: ## Run all the tests
|
||||
$(GINKGO) -v -r --label-filter=$(label-filter)
|
||||
@@ -68,17 +65,16 @@ test-controller: ## Run the controller tests (pkg/controller)
|
||||
test-e2e: ## Run the e2e tests
|
||||
$(GINKGO) -v -r tests
|
||||
|
||||
.PHONY: build-crds
|
||||
build-crds: ## Build the CRDs specs
|
||||
@# This will return non-zero until all of our objects in ./pkg/apis can generate valid crds.
|
||||
@# allowDangerousTypes is needed for struct that use floats
|
||||
$(CONTROLLER_GEN) crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=false \
|
||||
paths=./pkg/apis/... \
|
||||
output:crd:dir=./charts/k3k/crds
|
||||
.PHONY: generate
|
||||
generate: ## Generate the CRDs specs
|
||||
go generate ./...
|
||||
|
||||
.PHONY: docs
|
||||
docs: ## Build the CRDs and CLI docs
|
||||
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml --renderer=markdown --source-path=./pkg/apis/k3k.io/v1alpha1 --output-path=./docs/crds/crd-docs.md
|
||||
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml \
|
||||
--renderer=markdown \
|
||||
--source-path=./pkg/apis/k3k.io/v1alpha1 \
|
||||
--output-path=./docs/crds/crd-docs.md
|
||||
@go run ./docs/cli/genclidoc.go
|
||||
|
||||
.PHONY: lint
|
||||
@@ -86,12 +82,11 @@ lint: ## Find any linting issues in the project
|
||||
$(GOLANGCI_LINT) run --timeout=5m
|
||||
|
||||
.PHONY: validate
|
||||
validate: build-crds docs ## Validate the project checking for any dependency or doc mismatch
|
||||
validate: generate docs ## Validate the project checking for any dependency or doc mismatch
|
||||
$(GINKGO) unfocus
|
||||
go mod tidy
|
||||
git --no-pager diff go.mod go.sum
|
||||
test -z "$(shell git status --porcelain)"
|
||||
|
||||
git status --porcelain
|
||||
git --no-pager diff --exit-code
|
||||
|
||||
.PHONY: install
|
||||
install: ## Install K3k with Helm on the targeted Kubernetes cluster
|
||||
@@ -104,4 +99,4 @@ install: ## Install K3k with Helm on the targeted Kubernetes cluster
|
||||
|
||||
.PHONY: help
|
||||
help: ## Show this help.
|
||||
@egrep -h '\s##\s' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m %-30s\033[0m %s\n", $$1, $$2}'
|
||||
@egrep -h '\s##\s' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m %-30s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
@@ -56,10 +56,10 @@ This section provides instructions on how to install K3k and the `k3kcli`.
|
||||
2. Install the K3k controller:
|
||||
|
||||
```bash
|
||||
helm install --namespace k3k-system --create-namespace k3k k3k/k3k --devel
|
||||
helm install --namespace k3k-system --create-namespace k3k k3k/k3k
|
||||
```
|
||||
|
||||
**NOTE:** K3k is currently under development, so the chart is marked as a development chart. This means you need to add the `--devel` flag to install it. For production use, keep an eye on releases for stable versions. We recommend using the latest released version when possible.
|
||||
**NOTE:** K3k is currently under development. We recommend using the latest released version when possible.
|
||||
|
||||
|
||||
### Install the `k3kcli`
|
||||
@@ -71,7 +71,7 @@ To install it, simply download the latest available version for your architectur
|
||||
For example, you can download the Linux amd64 version with:
|
||||
|
||||
```
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.0/k3kcli-linux-amd64 && \
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.2/k3kcli-linux-amd64 && \
|
||||
chmod +x k3kcli && \
|
||||
sudo mv k3kcli /usr/local/bin
|
||||
```
|
||||
@@ -79,7 +79,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.0/k3kcli-l
|
||||
You should now be able to run:
|
||||
```bash
|
||||
-> % k3kcli --version
|
||||
k3kcli Version: v0.3.0
|
||||
k3kcli Version: v0.3.2
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -2,5 +2,5 @@ apiVersion: v2
|
||||
name: k3k
|
||||
description: A Helm chart for K3K
|
||||
type: application
|
||||
version: 0.3.1-r2
|
||||
appVersion: v0.3.1
|
||||
version: 0.3.2
|
||||
appVersion: v0.3.2
|
||||
|
||||
@@ -14,7 +14,11 @@ spec:
|
||||
singular: cluster
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.mode
|
||||
name: Mode
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
@@ -65,6 +69,119 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
agentEnvs:
|
||||
description: AgentEnvs specifies list of environment variables to
|
||||
set in the agent pod.
|
||||
items:
|
||||
description: EnvVar represents an environment variable present in
|
||||
a Container.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
Variable references $(VAR_NAME) are expanded
|
||||
using the previously defined environment variables in the container and
|
||||
any service environment variables. If a variable cannot be resolved,
|
||||
the reference in the input string will be unchanged. Double $$ are reduced
|
||||
to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
|
||||
"$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
|
||||
Escaped references will never be expanded, regardless of whether the variable
|
||||
exists or not.
|
||||
Defaults to "".
|
||||
type: string
|
||||
valueFrom:
|
||||
description: Source for the environment variable's value. Cannot
|
||||
be used if value is not empty.
|
||||
properties:
|
||||
configMapKeyRef:
|
||||
description: Selects a key of a ConfigMap.
|
||||
properties:
|
||||
key:
|
||||
description: The key to select.
|
||||
type: string
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the ConfigMap or its key
|
||||
must be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fieldRef:
|
||||
description: |-
|
||||
Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['<KEY>']`, `metadata.annotations['<KEY>']`,
|
||||
spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: Version of the schema the FieldPath is
|
||||
written in terms of, defaults to "v1".
|
||||
type: string
|
||||
fieldPath:
|
||||
description: Path of the field to select in the specified
|
||||
API version.
|
||||
type: string
|
||||
required:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
|
||||
properties:
|
||||
containerName:
|
||||
description: 'Container name: required for volumes,
|
||||
optional for env vars'
|
||||
type: string
|
||||
divisor:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Specifies the output format of the exposed
|
||||
resources, defaults to "1"
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
resource:
|
||||
description: 'Required: resource to select'
|
||||
type: string
|
||||
required:
|
||||
- resource
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
secretKeyRef:
|
||||
description: Selects a key of a secret in the pod's namespace
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must
|
||||
be a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must
|
||||
be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
agents:
|
||||
default: 0
|
||||
description: |-
|
||||
@@ -94,29 +211,6 @@ spec:
|
||||
x-kubernetes-validations:
|
||||
- message: clusterDNS is immutable
|
||||
rule: self == oldSelf
|
||||
clusterLimit:
|
||||
description: Limit defines resource limits for server/agent nodes.
|
||||
properties:
|
||||
serverLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ServerLimit specifies resource limits for server
|
||||
nodes.
|
||||
type: object
|
||||
workerLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: WorkerLimit specifies resource limits for agent nodes.
|
||||
type: object
|
||||
type: object
|
||||
expose:
|
||||
description: |-
|
||||
Expose specifies options for exposing the API server.
|
||||
@@ -140,6 +234,21 @@ spec:
|
||||
loadbalancer:
|
||||
description: LoadBalancer specifies options for exposing the API
|
||||
server through a LoadBalancer service.
|
||||
properties:
|
||||
etcdPort:
|
||||
description: |-
|
||||
ETCDPort is the port on which the ETCD service is exposed when type is LoadBalancer.
|
||||
If not specified, the default etcd 2379 port will be allocated.
|
||||
If 0 or negative, the port will not be exposed.
|
||||
format: int32
|
||||
type: integer
|
||||
serverPort:
|
||||
description: |-
|
||||
ServerPort is the port on which the K3s server is exposed when type is LoadBalancer.
|
||||
If not specified, the default https 443 port will be allocated.
|
||||
If 0 or negative, the port will not be exposed.
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
nodePort:
|
||||
description: NodePort specifies options for exposing the API server
|
||||
@@ -148,19 +257,15 @@ spec:
|
||||
etcdPort:
|
||||
description: |-
|
||||
ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
|
||||
If not specified, a port will be allocated (default: 30000-32767).
|
||||
If not specified, a random port between 30000-32767 will be allocated.
|
||||
If out of range, the port will not be exposed.
|
||||
format: int32
|
||||
type: integer
|
||||
serverPort:
|
||||
description: |-
|
||||
ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.
|
||||
If not specified, a port will be allocated (default: 30000-32767).
|
||||
format: int32
|
||||
type: integer
|
||||
servicePort:
|
||||
description: |-
|
||||
ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.
|
||||
If not specified, a port will be allocated (default: 30000-32767).
|
||||
ServerPort is the port on each node on which the K3s server is exposed when type is NodePort.
|
||||
If not specified, a random port between 30000-32767 will be allocated.
|
||||
If out of range, the port will not be exposed.
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
@@ -225,6 +330,128 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
serverEnvs:
|
||||
description: ServerEnvs specifies list of environment variables to
|
||||
set in the server pod.
|
||||
items:
|
||||
description: EnvVar represents an environment variable present in
|
||||
a Container.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
Variable references $(VAR_NAME) are expanded
|
||||
using the previously defined environment variables in the container and
|
||||
any service environment variables. If a variable cannot be resolved,
|
||||
the reference in the input string will be unchanged. Double $$ are reduced
|
||||
to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
|
||||
"$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
|
||||
Escaped references will never be expanded, regardless of whether the variable
|
||||
exists or not.
|
||||
Defaults to "".
|
||||
type: string
|
||||
valueFrom:
|
||||
description: Source for the environment variable's value. Cannot
|
||||
be used if value is not empty.
|
||||
properties:
|
||||
configMapKeyRef:
|
||||
description: Selects a key of a ConfigMap.
|
||||
properties:
|
||||
key:
|
||||
description: The key to select.
|
||||
type: string
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the ConfigMap or its key
|
||||
must be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fieldRef:
|
||||
description: |-
|
||||
Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['<KEY>']`, `metadata.annotations['<KEY>']`,
|
||||
spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: Version of the schema the FieldPath is
|
||||
written in terms of, defaults to "v1".
|
||||
type: string
|
||||
fieldPath:
|
||||
description: Path of the field to select in the specified
|
||||
API version.
|
||||
type: string
|
||||
required:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
|
||||
properties:
|
||||
containerName:
|
||||
description: 'Container name: required for volumes,
|
||||
optional for env vars'
|
||||
type: string
|
||||
divisor:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Specifies the output format of the exposed
|
||||
resources, defaults to "1"
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
resource:
|
||||
description: 'Required: resource to select'
|
||||
type: string
|
||||
required:
|
||||
- resource
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
secretKeyRef:
|
||||
description: Selects a key of a secret in the pod's namespace
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must
|
||||
be a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must
|
||||
be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
serverLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ServerLimit specifies resource limits for server nodes.
|
||||
type: object
|
||||
servers:
|
||||
default: 1
|
||||
description: |-
|
||||
@@ -271,6 +498,15 @@ spec:
|
||||
It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).
|
||||
If not specified, the Kubernetes version of the host node will be used.
|
||||
type: string
|
||||
workerLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: WorkerLimit specifies resource limits for agent nodes.
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: Status reflects the observed state of the Cluster.
|
||||
|
||||
@@ -1,212 +0,0 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: clustersets.k3k.io
|
||||
spec:
|
||||
group: k3k.io
|
||||
names:
|
||||
kind: ClusterSet
|
||||
listKind: ClusterSetList
|
||||
plural: clustersets
|
||||
singular: clusterset
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
ClusterSet represents a group of virtual Kubernetes clusters managed by k3k.
|
||||
It allows defining common configurations and constraints for the clusters within the set.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
default: {}
|
||||
description: Spec defines the desired state of the ClusterSet.
|
||||
properties:
|
||||
allowedNodeTypes:
|
||||
default:
|
||||
- shared
|
||||
description: AllowedNodeTypes specifies the allowed cluster provisioning
|
||||
modes. Defaults to [shared].
|
||||
items:
|
||||
description: ClusterMode is the possible provisioning mode of a
|
||||
Cluster.
|
||||
enum:
|
||||
- shared
|
||||
- virtual
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
x-kubernetes-validations:
|
||||
- message: mode is immutable
|
||||
rule: self == oldSelf
|
||||
defaultLimits:
|
||||
description: DefaultLimits specifies the default resource limits for
|
||||
servers/agents when a cluster in the set doesn't provide any.
|
||||
properties:
|
||||
serverLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ServerLimit specifies resource limits for server
|
||||
nodes.
|
||||
type: object
|
||||
workerLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: WorkerLimit specifies resource limits for agent nodes.
|
||||
type: object
|
||||
type: object
|
||||
defaultNodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: DefaultNodeSelector specifies the node selector that
|
||||
applies to all clusters (server + agent) in the set.
|
||||
type: object
|
||||
defaultPriorityClass:
|
||||
description: DefaultPriorityClass specifies the priorityClassName
|
||||
applied to all pods of all clusters in the set.
|
||||
type: string
|
||||
disableNetworkPolicy:
|
||||
description: DisableNetworkPolicy indicates whether to disable the
|
||||
creation of a default network policy for cluster isolation.
|
||||
type: boolean
|
||||
maxLimits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: MaxLimits specifies the maximum resource limits that
|
||||
apply to all clusters (server + agent) in the set.
|
||||
type: object
|
||||
podSecurityAdmissionLevel:
|
||||
description: PodSecurityAdmissionLevel specifies the pod security
|
||||
admission level applied to the pods in the namespace.
|
||||
enum:
|
||||
- privileged
|
||||
- baseline
|
||||
- restricted
|
||||
type: string
|
||||
type: object
|
||||
status:
|
||||
description: Status reflects the observed state of the ClusterSet.
|
||||
properties:
|
||||
conditions:
|
||||
description: Conditions are the individual conditions for the cluster
|
||||
set.
|
||||
items:
|
||||
description: "Condition contains details for one aspect of the current
|
||||
state of this API Resource.\n---\nThis struct is intended for
|
||||
direct use as an array at the field path .status.conditions. For
|
||||
example,\n\n\n\ttype FooStatus struct{\n\t // Represents the
|
||||
observations of a foo's current state.\n\t // Known .status.conditions.type
|
||||
are: \"Available\", \"Progressing\", and \"Degraded\"\n\t //
|
||||
+patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t
|
||||
\ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\"
|
||||
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t
|
||||
\ // other fields\n\t}"
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
message is a human readable message indicating details about the transition.
|
||||
This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
with respect to the current state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: |-
|
||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
Producers of specific condition types may define expected values and meanings for this field,
|
||||
and whether the values are considered a guaranteed API.
|
||||
The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
---
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
|
||||
useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
lastUpdateTime:
|
||||
description: LastUpdate is the timestamp when the status was last
|
||||
updated.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration was the generation at the time the
|
||||
status was updated.
|
||||
format: int64
|
||||
type: integer
|
||||
summary:
|
||||
description: Summary is a summary of the status.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
319
charts/k3k/crds/k3k.io_virtualclusterpolicies.yaml
Normal file
319
charts/k3k/crds/k3k.io_virtualclusterpolicies.yaml
Normal file
@@ -0,0 +1,319 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: virtualclusterpolicies.k3k.io
|
||||
spec:
|
||||
group: k3k.io
|
||||
names:
|
||||
kind: VirtualClusterPolicy
|
||||
listKind: VirtualClusterPolicyList
|
||||
plural: virtualclusterpolicies
|
||||
shortNames:
|
||||
- vcp
|
||||
singular: virtualclusterpolicy
|
||||
scope: Cluster
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.allowedMode
|
||||
name: Mode
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
VirtualClusterPolicy allows defining common configurations and constraints
|
||||
for clusters within a clusterpolicy.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
default: {}
|
||||
description: Spec defines the desired state of the VirtualClusterPolicy.
|
||||
properties:
|
||||
allowedMode:
|
||||
default: shared
|
||||
description: AllowedMode specifies the allowed cluster provisioning
|
||||
mode. Defaults to "shared".
|
||||
enum:
|
||||
- shared
|
||||
- virtual
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: mode is immutable
|
||||
rule: self == oldSelf
|
||||
defaultNodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: DefaultNodeSelector specifies the node selector that
|
||||
applies to all clusters (server + agent) in the target Namespace.
|
||||
type: object
|
||||
defaultPriorityClass:
|
||||
description: DefaultPriorityClass specifies the priorityClassName
|
||||
applied to all pods of all clusters in the target Namespace.
|
||||
type: string
|
||||
disableNetworkPolicy:
|
||||
description: DisableNetworkPolicy indicates whether to disable the
|
||||
creation of a default network policy for cluster isolation.
|
||||
type: boolean
|
||||
limit:
|
||||
description: |-
|
||||
Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy
|
||||
to set defaults and constraints (min/max)
|
||||
properties:
|
||||
limits:
|
||||
description: Limits is the list of LimitRangeItem objects that
|
||||
are enforced.
|
||||
items:
|
||||
description: LimitRangeItem defines a min/max usage limit for
|
||||
any resource that matches on kind.
|
||||
properties:
|
||||
default:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Default resource requirement limit value by
|
||||
resource name if resource limit is omitted.
|
||||
type: object
|
||||
defaultRequest:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: DefaultRequest is the default resource requirement
|
||||
request value by resource name if resource request is
|
||||
omitted.
|
||||
type: object
|
||||
max:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Max usage constraints on this kind by resource
|
||||
name.
|
||||
type: object
|
||||
maxLimitRequestRatio:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: MaxLimitRequestRatio if specified, the named
|
||||
resource must have a request and limit that are both non-zero
|
||||
where limit divided by request is less than or equal to
|
||||
the enumerated value; this represents the max burst for
|
||||
the named resource.
|
||||
type: object
|
||||
min:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Min usage constraints on this kind by resource
|
||||
name.
|
||||
type: object
|
||||
type:
|
||||
description: Type of resource that this limit applies to.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- limits
|
||||
type: object
|
||||
podSecurityAdmissionLevel:
|
||||
description: PodSecurityAdmissionLevel specifies the pod security
|
||||
admission level applied to the pods in the namespace.
|
||||
enum:
|
||||
- privileged
|
||||
- baseline
|
||||
- restricted
|
||||
type: string
|
||||
quota:
|
||||
description: Quota specifies the resource limits for clusters within
|
||||
a clusterpolicy.
|
||||
properties:
|
||||
hard:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: |-
|
||||
hard is the set of desired hard limits for each named resource.
|
||||
More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
|
||||
type: object
|
||||
scopeSelector:
|
||||
description: |-
|
||||
scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
|
||||
but expressed using ScopeSelectorOperator in combination with possible values.
|
||||
For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of scope selector requirements by scope
|
||||
of the resources.
|
||||
items:
|
||||
description: |-
|
||||
A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
|
||||
that relates the scope name and values.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents a scope's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist.
|
||||
type: string
|
||||
scopeName:
|
||||
description: The name of the scope that the selector
|
||||
applies to.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- operator
|
||||
- scopeName
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
scopes:
|
||||
description: |-
|
||||
A collection of filters that must match each object tracked by a quota.
|
||||
If not specified, the quota matches all objects.
|
||||
items:
|
||||
description: A ResourceQuotaScope defines a filter that must
|
||||
match each object tracked by a quota
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: Status reflects the observed state of the VirtualClusterPolicy.
|
||||
properties:
|
||||
conditions:
|
||||
description: Conditions are the individual conditions for the cluster
|
||||
set.
|
||||
items:
|
||||
description: "Condition contains details for one aspect of the current
|
||||
state of this API Resource.\n---\nThis struct is intended for
|
||||
direct use as an array at the field path .status.conditions. For
|
||||
example,\n\n\n\ttype FooStatus struct{\n\t // Represents the
|
||||
observations of a foo's current state.\n\t // Known .status.conditions.type
|
||||
are: \"Available\", \"Progressing\", and \"Degraded\"\n\t //
|
||||
+patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t
|
||||
\ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\"
|
||||
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t
|
||||
\ // other fields\n\t}"
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
message is a human readable message indicating details about the transition.
|
||||
This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
with respect to the current state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: |-
|
||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
Producers of specific condition types may define expected values and meanings for this field,
|
||||
and whether the values are considered a guaranteed API.
|
||||
The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
---
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
|
||||
useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
lastUpdateTime:
|
||||
description: LastUpdate is the timestamp when the status was last
|
||||
updated.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration was the generation at the time the
|
||||
status was updated.
|
||||
format: int64
|
||||
type: integer
|
||||
summary:
|
||||
description: Summary is a summary of the status.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
@@ -26,6 +26,10 @@ spec:
|
||||
value: "{{ .Values.sharedAgent.image.repository }}:{{ default .Chart.AppVersion .Values.sharedAgent.image.tag }}"
|
||||
- name: SHARED_AGENT_PULL_POLICY
|
||||
value: {{ .Values.sharedAgent.image.pullPolicy }}
|
||||
- name: K3S_IMAGE
|
||||
value: {{ .Values.k3sServer.image.repository }}
|
||||
- name: K3S_IMAGE_PULL_POLICY
|
||||
value: {{ .Values.k3sServer.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: https
|
||||
|
||||
@@ -10,7 +10,7 @@ nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
host:
|
||||
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy for clustersets, if not set
|
||||
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set
|
||||
# the controller will collect the PodCIDRs of all the nodes on the system.
|
||||
clusterCIDR: ""
|
||||
|
||||
@@ -27,3 +27,8 @@ sharedAgent:
|
||||
repository: "rancher/k3k-kubelet"
|
||||
tag: ""
|
||||
pullPolicy: ""
|
||||
# image registry configuration related to the k3s server
|
||||
k3sServer:
|
||||
image:
|
||||
repository: "rancher/k3s"
|
||||
pullPolicy: ""
|
||||
|
||||
@@ -4,13 +4,14 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func NewClusterCommand() *cli.Command {
|
||||
func NewClusterCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "cluster",
|
||||
Usage: "cluster command",
|
||||
Subcommands: []*cli.Command{
|
||||
NewClusterCreateCmd(),
|
||||
NewClusterDeleteCmd(),
|
||||
NewClusterCreateCmd(appCtx),
|
||||
NewClusterDeleteCmd(appCtx),
|
||||
NewClusterListCmd(appCtx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -18,11 +16,9 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type CreateConfig struct {
|
||||
@@ -33,14 +29,17 @@ type CreateConfig struct {
|
||||
agents int
|
||||
serverArgs cli.StringSlice
|
||||
agentArgs cli.StringSlice
|
||||
serverEnvs cli.StringSlice
|
||||
agentEnvs cli.StringSlice
|
||||
persistenceType string
|
||||
storageClassName string
|
||||
version string
|
||||
mode string
|
||||
kubeconfigServerHost string
|
||||
policy string
|
||||
}
|
||||
|
||||
func NewClusterCreateCmd() *cli.Command {
|
||||
func NewClusterCreateCmd(appCtx *AppContext) *cli.Command {
|
||||
createConfig := &CreateConfig{}
|
||||
createFlags := NewCreateFlags(createConfig)
|
||||
|
||||
@@ -48,15 +47,16 @@ func NewClusterCreateCmd() *cli.Command {
|
||||
Name: "create",
|
||||
Usage: "Create new cluster",
|
||||
UsageText: "k3kcli cluster create [command options] NAME",
|
||||
Action: createAction(createConfig),
|
||||
Flags: append(CommonFlags, createFlags...),
|
||||
Action: createAction(appCtx, createConfig),
|
||||
Flags: WithCommonFlags(appCtx, createFlags...),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
@@ -67,15 +67,13 @@ func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
restConfig, err := loadRESTConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
if config.mode == string(v1alpha1.SharedClusterMode) && config.agents != 0 {
|
||||
return errors.New("invalid flag, --agents flag is only allowed in virtual mode")
|
||||
}
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{
|
||||
Scheme: Scheme,
|
||||
})
|
||||
if err != nil {
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
if err := createNamespace(ctx, client, namespace, config.policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -86,25 +84,25 @@ func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
}
|
||||
|
||||
if config.token != "" {
|
||||
logrus.Infof("Creating cluster token secret")
|
||||
logrus.Info("Creating cluster token secret")
|
||||
|
||||
obj := k3kcluster.TokenSecretObj(config.token, name, Namespace())
|
||||
obj := k3kcluster.TokenSecretObj(config.token, name, namespace)
|
||||
|
||||
if err := ctrlClient.Create(ctx, &obj); err != nil {
|
||||
if err := client.Create(ctx, &obj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Creating a new cluster [%s]", name)
|
||||
logrus.Infof("Creating cluster [%s] in namespace [%s]", name, namespace)
|
||||
|
||||
cluster := newCluster(name, Namespace(), config)
|
||||
cluster := newCluster(name, namespace, config)
|
||||
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
}
|
||||
|
||||
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
|
||||
url, err := url.Parse(restConfig.Host)
|
||||
url, err := url.Parse(appCtx.RestConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -116,7 +114,7 @@ func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
|
||||
cluster.Spec.TLSSANs = []string{host[0]}
|
||||
|
||||
if err := ctrlClient.Create(ctx, cluster); err != nil {
|
||||
if err := client.Create(ctx, cluster); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
logrus.Infof("Cluster [%s] already exists", name)
|
||||
} else {
|
||||
@@ -140,29 +138,13 @@ func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(availableBackoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Extract(ctx, ctrlClient, cluster, host[0])
|
||||
kubeconfig, err = cfg.Extract(ctx, client, cluster, host[0])
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof(`You can start using the cluster with:
|
||||
|
||||
export KUBECONFIG=%s
|
||||
kubectl cluster-info
|
||||
`, filepath.Join(pwd, cluster.Name+"-kubeconfig.yaml"))
|
||||
|
||||
kubeconfigData, err := clientcmd.Write(*kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(cluster.Name+"-kubeconfig.yaml", kubeconfigData, 0644)
|
||||
return writeKubeconfigFile(cluster, kubeconfig)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,6 +165,8 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
|
||||
ServiceCIDR: config.serviceCIDR,
|
||||
ServerArgs: config.serverArgs.Value(),
|
||||
AgentArgs: config.agentArgs.Value(),
|
||||
ServerEnvs: env(config.serverEnvs.Value()),
|
||||
AgentEnvs: env(config.agentEnvs.Value()),
|
||||
Version: config.version,
|
||||
Mode: v1alpha1.ClusterMode(config.mode),
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
@@ -204,3 +188,21 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
|
||||
|
||||
return cluster
|
||||
}
|
||||
|
||||
func env(envSlice []string) []v1.EnvVar {
|
||||
var envVars []v1.EnvVar
|
||||
|
||||
for _, env := range envSlice {
|
||||
keyValue := strings.Split(env, "=")
|
||||
if len(keyValue) != 2 {
|
||||
logrus.Fatalf("incorrect value for environment variable %s", env)
|
||||
}
|
||||
|
||||
envVars = append(envVars, v1.EnvVar{
|
||||
Name: keyValue[0],
|
||||
Value: keyValue[1],
|
||||
})
|
||||
}
|
||||
|
||||
return envVars
|
||||
}
|
||||
|
||||
@@ -70,6 +70,16 @@ func NewCreateFlags(config *CreateConfig) []cli.Flag {
|
||||
Usage: "agents extra arguments",
|
||||
Destination: &config.agentArgs,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "server-envs",
|
||||
Usage: "servers extra Envs",
|
||||
Destination: &config.serverEnvs,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "agent-envs",
|
||||
Usage: "agents extra Envs",
|
||||
Destination: &config.agentEnvs,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "version",
|
||||
Usage: "k3s version",
|
||||
@@ -94,5 +104,10 @@ func NewCreateFlags(config *CreateConfig) []cli.Flag {
|
||||
Usage: "override the kubeconfig server host",
|
||||
Destination: &config.kubeconfigServerHost,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "policy",
|
||||
Usage: "The policy to create the cluster in",
|
||||
Destination: &config.policy,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,55 +6,112 @@ import (
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
func NewClusterDeleteCmd() *cli.Command {
|
||||
var keepData bool
|
||||
|
||||
func NewClusterDeleteCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete an existing cluster",
|
||||
UsageText: "k3kcli cluster delete [command options] NAME",
|
||||
Action: delete,
|
||||
Flags: CommonFlags,
|
||||
Name: "delete",
|
||||
Usage: "Delete an existing cluster",
|
||||
UsageText: "k3kcli cluster delete [command options] NAME",
|
||||
Action: delete(appCtx),
|
||||
Flags: WithCommonFlags(appCtx, &cli.BoolFlag{
|
||||
Name: "keep-data",
|
||||
Usage: "keeps persistence volumes created for the cluster after deletion",
|
||||
Destination: &keepData,
|
||||
}),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func delete(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
func delete(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
logrus.Infof("Deleting [%s] cluster in namespace [%s]", name, namespace)
|
||||
|
||||
cluster := v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
// keep bootstrap secrets and tokens if --keep-data flag is passed
|
||||
if keepData {
|
||||
// skip removing tokenSecret
|
||||
if err := RemoveOwnerReferenceFromSecret(ctx, k3kcluster.TokenSecretName(cluster.Name), client, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// skip removing webhook secret
|
||||
if err := RemoveOwnerReferenceFromSecret(ctx, agent.WebhookSecretName(cluster.Name), client, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
matchingLabels := ctrlclient.MatchingLabels(map[string]string{"cluster": cluster.Name, "role": "server"})
|
||||
listOpts := ctrlclient.ListOptions{Namespace: cluster.Namespace}
|
||||
matchingLabels.ApplyToList(&listOpts)
|
||||
deleteOpts := &ctrlclient.DeleteAllOfOptions{ListOptions: listOpts}
|
||||
|
||||
if err := client.DeleteAllOf(ctx, &v1.PersistentVolumeClaim{}, deleteOpts); err != nil {
|
||||
return ctrlclient.IgnoreNotFound(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.Delete(ctx, &cluster); err != nil {
|
||||
return ctrlclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
restConfig, err := loadRESTConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{
|
||||
Scheme: Scheme,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("deleting [%s] cluster", name)
|
||||
|
||||
cluster := v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: Namespace(),
|
||||
},
|
||||
}
|
||||
|
||||
return ctrlClient.Delete(ctx, &cluster)
|
||||
}
|
||||
|
||||
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1alpha1.Cluster) error {
|
||||
var secret v1.Secret
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
if err := cl.Get(ctx, key, &secret); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logrus.Warnf("%s secret is not found", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if controllerutil.HasControllerReference(&secret) {
|
||||
if err := controllerutil.RemoveOwnerReference(&cluster, &secret, cl.Scheme()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cl.Update(ctx, &secret)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
51
cli/cmds/cluster_list.go
Normal file
51
cli/cmds/cluster_list.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/urfave/cli/v2"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func NewClusterListCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "list",
|
||||
Usage: "List all the existing cluster",
|
||||
UsageText: "k3kcli cluster list [command options]",
|
||||
Action: list(appCtx),
|
||||
Flags: WithCommonFlags(appCtx),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func list(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() > 0 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
var clusters v1alpha1.ClusterList
|
||||
if err := client.List(ctx, &clusters, ctrlclient.InNamespace(appCtx.namespace)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
crd := &apiextensionsv1.CustomResourceDefinition{}
|
||||
if err := client.Get(ctx, types.NamespacedName{Name: "clusters.k3k.io"}, crd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
items := toPointerSlice(clusters.Items)
|
||||
table := createTable(crd, items)
|
||||
|
||||
printer := printers.NewTablePrinter(printers.PrintOptions{WithNamespace: true})
|
||||
|
||||
return printer.PrintObj(table, clx.App.Writer)
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -73,86 +72,88 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
var subcommands = []*cli.Command{
|
||||
{
|
||||
func NewKubeconfigCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "kubeconfig",
|
||||
Usage: "Manage kubeconfig for clusters",
|
||||
Subcommands: []*cli.Command{
|
||||
NewKubeconfigGenerateCmd(appCtx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewKubeconfigGenerateCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "generate",
|
||||
Usage: "Generate kubeconfig for clusters",
|
||||
SkipFlagParsing: false,
|
||||
Action: generate,
|
||||
Flags: append(CommonFlags, generateKubeconfigFlags...),
|
||||
},
|
||||
}
|
||||
|
||||
func NewKubeconfigCommand() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "kubeconfig",
|
||||
Usage: "Manage kubeconfig for clusters",
|
||||
Subcommands: subcommands,
|
||||
Action: generate(appCtx),
|
||||
Flags: WithCommonFlags(appCtx, generateKubeconfigFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func generate(clx *cli.Context) error {
|
||||
restConfig, err := loadRESTConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func generate(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{
|
||||
Scheme: Scheme,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterKey := types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: appCtx.Namespace(name),
|
||||
}
|
||||
|
||||
clusterKey := types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: Namespace(),
|
||||
}
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
if err := ctrlClient.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
url, err := url.Parse(restConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := strings.Split(url.Host, ":")
|
||||
if kubeconfigServerHost != "" {
|
||||
host = []string{kubeconfigServerHost}
|
||||
|
||||
if err := altNames.Set(kubeconfigServerHost); err != nil {
|
||||
if err := client.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
url, err := url.Parse(appCtx.RestConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := strings.Split(url.Host, ":")
|
||||
if kubeconfigServerHost != "" {
|
||||
host = []string{kubeconfigServerHost}
|
||||
|
||||
if err := altNames.Set(kubeconfigServerHost); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
certAltNames := certs.AddSANs(altNames.Value())
|
||||
|
||||
orgs := org.Value()
|
||||
if orgs == nil {
|
||||
orgs = []string{user.SystemPrivilegedGroup}
|
||||
}
|
||||
|
||||
cfg := kubeconfig.KubeConfig{
|
||||
CN: cn,
|
||||
ORG: orgs,
|
||||
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
|
||||
AltNames: certAltNames,
|
||||
}
|
||||
|
||||
logrus.Infof("waiting for cluster to be available..")
|
||||
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Extract(ctx, client, &cluster, host[0])
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeKubeconfigFile(&cluster, kubeconfig)
|
||||
}
|
||||
}
|
||||
|
||||
certAltNames := certs.AddSANs(altNames.Value())
|
||||
|
||||
orgs := org.Value()
|
||||
if orgs == nil {
|
||||
orgs = []string{user.SystemPrivilegedGroup}
|
||||
}
|
||||
|
||||
cfg := kubeconfig.KubeConfig{
|
||||
CN: cn,
|
||||
ORG: orgs,
|
||||
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
|
||||
AltNames: certAltNames,
|
||||
}
|
||||
|
||||
logrus.Infof("waiting for cluster to be available..")
|
||||
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Extract(ctx, ctrlClient, &cluster, host[0])
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config) error {
|
||||
if configName == "" {
|
||||
configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml"
|
||||
}
|
||||
|
||||
pwd, err := os.Getwd()
|
||||
@@ -160,11 +161,7 @@ func generate(clx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if configName == "" {
|
||||
configName = cluster.Name + "-kubeconfig.yaml"
|
||||
}
|
||||
|
||||
logrus.Infof(`You can start using the cluster with:
|
||||
logrus.Infof(`You can start using the cluster with:
|
||||
|
||||
export KUBECONFIG=%s
|
||||
kubectl cluster-info
|
||||
|
||||
17
cli/cmds/policy.go
Normal file
17
cli/cmds/policy.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func NewPolicyCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "policy",
|
||||
Usage: "policy command",
|
||||
Subcommands: []*cli.Command{
|
||||
NewPolicyCreateCmd(appCtx),
|
||||
NewPolicyDeleteCmd(appCtx),
|
||||
NewPolicyListCmd(appCtx),
|
||||
},
|
||||
}
|
||||
}
|
||||
118
cli/cmds/policy_create.go
Normal file
118
cli/cmds/policy_create.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type VirtualClusterPolicyCreateConfig struct {
|
||||
mode string
|
||||
}
|
||||
|
||||
func NewPolicyCreateCmd(appCtx *AppContext) *cli.Command {
|
||||
config := &VirtualClusterPolicyCreateConfig{}
|
||||
|
||||
createFlags := []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "mode",
|
||||
Usage: "The allowed mode type of the policy",
|
||||
Destination: &config.mode,
|
||||
Value: "shared",
|
||||
Action: func(ctx *cli.Context, value string) error {
|
||||
switch value {
|
||||
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`mode should be one of "shared" or "virtual"`)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &cli.Command{
|
||||
Name: "create",
|
||||
Usage: "Create new policy",
|
||||
UsageText: "k3kcli policy create [command options] NAME",
|
||||
Action: policyCreateAction(appCtx, config),
|
||||
Flags: WithCommonFlags(appCtx, createFlags...),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func policyCreateAction(appCtx *AppContext, config *VirtualClusterPolicyCreateConfig) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
policyName := clx.Args().First()
|
||||
|
||||
_, err := createPolicy(ctx, client, v1alpha1.ClusterMode(config.mode), policyName)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func createNamespace(ctx context.Context, client client.Client, name, policyName string) error {
|
||||
ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}}
|
||||
|
||||
if policyName != "" {
|
||||
ns.Labels = map[string]string{
|
||||
policy.PolicyNameLabelKey: policyName,
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.Get(ctx, types.NamespacedName{Name: name}, ns); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof(`Creating namespace [%s]`, name)
|
||||
|
||||
if err := client.Create(ctx, ns); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createPolicy(ctx context.Context, client client.Client, mode v1alpha1.ClusterMode, policyName string) (*v1alpha1.VirtualClusterPolicy, error) {
|
||||
logrus.Infof("Creating policy [%s]", policyName)
|
||||
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: policyName,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "VirtualClusterPolicy",
|
||||
APIVersion: "k3k.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
AllowedMode: mode,
|
||||
},
|
||||
}
|
||||
|
||||
if err := client.Create(ctx, policy); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Infof("Policy [%s] already exists", policyName)
|
||||
}
|
||||
|
||||
return policy, nil
|
||||
}
|
||||
61
cli/cmds/policy_delete.go
Normal file
61
cli/cmds/policy_delete.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func NewPolicyDeleteCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete an existing policy",
|
||||
UsageText: "k3kcli policy delete [command options] NAME",
|
||||
Action: policyDeleteAction(appCtx),
|
||||
Flags: WithCommonFlags(appCtx),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func policyDeleteAction(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
logrus.Infof("Deleting policy in namespace [%s]", namespace)
|
||||
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
if err := client.Delete(ctx, policy); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logrus.Warnf("Policy not found in namespace [%s]", namespace)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
50
cli/cmds/policy_list.go
Normal file
50
cli/cmds/policy_list.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/urfave/cli/v2"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
)
|
||||
|
||||
func NewPolicyListCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "list",
|
||||
Usage: "List all the existing policies",
|
||||
UsageText: "k3kcli policy list [command options]",
|
||||
Action: policyList(appCtx),
|
||||
Flags: WithCommonFlags(appCtx),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func policyList(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() > 0 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
var policies v1alpha1.VirtualClusterPolicyList
|
||||
if err := client.List(ctx, &policies); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
crd := &apiextensionsv1.CustomResourceDefinition{}
|
||||
if err := client.Get(ctx, types.NamespacedName{Name: "virtualclusterpolicies.k3k.io"}, crd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
items := toPointerSlice(policies.Items)
|
||||
table := createTable(crd, items)
|
||||
|
||||
printer := printers.NewTablePrinter(printers.PrintOptions{})
|
||||
|
||||
return printer.PrintObj(table, clx.App.Writer)
|
||||
}
|
||||
}
|
||||
108
cli/cmds/root.go
108
cli/cmds/root.go
@@ -7,61 +7,55 @@ import (
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultNamespace = "default"
|
||||
)
|
||||
type AppContext struct {
|
||||
RestConfig *rest.Config
|
||||
Client client.Client
|
||||
|
||||
var (
|
||||
Scheme = runtime.NewScheme()
|
||||
|
||||
debug bool
|
||||
// Global flags
|
||||
Debug bool
|
||||
Kubeconfig string
|
||||
namespace string
|
||||
|
||||
CommonFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig",
|
||||
Usage: "kubeconfig path",
|
||||
Destination: &Kubeconfig,
|
||||
DefaultText: "$HOME/.kube/config or $KUBECONFIG if set",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "namespace",
|
||||
Usage: "namespace to create the k3k cluster in",
|
||||
Destination: &namespace,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(Scheme)
|
||||
_ = v1alpha1.AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
func NewApp() *cli.App {
|
||||
appCtx := &AppContext{}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "k3kcli"
|
||||
app.Usage = "CLI for K3K"
|
||||
app.Flags = []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Turn on debug logs",
|
||||
Destination: &debug,
|
||||
EnvVars: []string{"K3K_DEBUG"},
|
||||
},
|
||||
}
|
||||
app.Flags = WithCommonFlags(appCtx)
|
||||
|
||||
app.Before = func(clx *cli.Context) error {
|
||||
if debug {
|
||||
if appCtx.Debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
|
||||
restConfig, err := loadRESTConfig(appCtx.Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
_ = apiextensionsv1.AddToScheme(scheme)
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
appCtx.RestConfig = restConfig
|
||||
appCtx.Client = ctrlClient
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -71,30 +65,56 @@ func NewApp() *cli.App {
|
||||
}
|
||||
|
||||
app.Commands = []*cli.Command{
|
||||
NewClusterCommand(),
|
||||
NewKubeconfigCommand(),
|
||||
NewClusterCmd(appCtx),
|
||||
NewPolicyCmd(appCtx),
|
||||
NewKubeconfigCmd(appCtx),
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
func Namespace() string {
|
||||
if namespace == "" {
|
||||
return defaultNamespace
|
||||
func (ctx *AppContext) Namespace(name string) string {
|
||||
if ctx.namespace != "" {
|
||||
return ctx.namespace
|
||||
}
|
||||
|
||||
return namespace
|
||||
return "k3k-" + name
|
||||
}
|
||||
|
||||
func loadRESTConfig() (*rest.Config, error) {
|
||||
func loadRESTConfig(kubeconfig string) (*rest.Config, error) {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
configOverrides := &clientcmd.ConfigOverrides{}
|
||||
|
||||
if Kubeconfig != "" {
|
||||
loadingRules.ExplicitPath = Kubeconfig
|
||||
if kubeconfig != "" {
|
||||
loadingRules.ExplicitPath = kubeconfig
|
||||
}
|
||||
|
||||
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
|
||||
|
||||
return kubeConfig.ClientConfig()
|
||||
}
|
||||
|
||||
func WithCommonFlags(appCtx *AppContext, flags ...cli.Flag) []cli.Flag {
|
||||
commonFlags := []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Turn on debug logs",
|
||||
Destination: &appCtx.Debug,
|
||||
EnvVars: []string{"K3K_DEBUG"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig",
|
||||
Usage: "kubeconfig path",
|
||||
Destination: &appCtx.Kubeconfig,
|
||||
DefaultText: "$HOME/.kube/config or $KUBECONFIG if set",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "namespace",
|
||||
Usage: "namespace to create the k3k cluster in",
|
||||
Aliases: []string{"n"},
|
||||
Destination: &appCtx.namespace,
|
||||
},
|
||||
}
|
||||
|
||||
return append(commonFlags, flags...)
|
||||
}
|
||||
|
||||
103
cli/cmds/table_printer.go
Normal file
103
cli/cmds/table_printer.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/util/jsonpath"
|
||||
)
|
||||
|
||||
// createTable creates a table to print from the printerColumn defined in the CRD spec, plus the name at the beginning
|
||||
func createTable[T runtime.Object](crd *apiextensionsv1.CustomResourceDefinition, objs []T) *metav1.Table {
|
||||
printerColumns := getPrinterColumnsFromCRD(crd)
|
||||
|
||||
return &metav1.Table{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "meta.k8s.io/v1", Kind: "Table"},
|
||||
ColumnDefinitions: convertToTableColumns(printerColumns),
|
||||
Rows: createTableRows(objs, printerColumns),
|
||||
}
|
||||
}
|
||||
|
||||
func getPrinterColumnsFromCRD(crd *apiextensionsv1.CustomResourceDefinition) []apiextensionsv1.CustomResourceColumnDefinition {
|
||||
printerColumns := []apiextensionsv1.CustomResourceColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name", Description: "Name of the Resource", JSONPath: ".metadata.name"},
|
||||
}
|
||||
|
||||
for _, version := range crd.Spec.Versions {
|
||||
if version.Name == "v1alpha1" {
|
||||
printerColumns = append(printerColumns, version.AdditionalPrinterColumns...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return printerColumns
|
||||
}
|
||||
|
||||
func convertToTableColumns(printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []metav1.TableColumnDefinition {
|
||||
var columnDefinitions []metav1.TableColumnDefinition
|
||||
|
||||
for _, col := range printerColumns {
|
||||
columnDefinitions = append(columnDefinitions, metav1.TableColumnDefinition{
|
||||
Name: col.Name,
|
||||
Type: col.Type,
|
||||
Format: col.Format,
|
||||
Description: col.Description,
|
||||
Priority: col.Priority,
|
||||
})
|
||||
}
|
||||
|
||||
return columnDefinitions
|
||||
}
|
||||
|
||||
func createTableRows[T runtime.Object](objs []T, printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []metav1.TableRow {
|
||||
var rows []metav1.TableRow
|
||||
|
||||
for _, obj := range objs {
|
||||
objMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&obj)
|
||||
if err != nil {
|
||||
rows = append(rows, metav1.TableRow{Cells: []any{"<error: " + err.Error() + ">"}})
|
||||
continue
|
||||
}
|
||||
|
||||
rows = append(rows, metav1.TableRow{
|
||||
Cells: buildRowCells(objMap, printerColumns),
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
})
|
||||
}
|
||||
|
||||
return rows
|
||||
}
|
||||
|
||||
func buildRowCells(objMap map[string]any, printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []any {
|
||||
var cells []any
|
||||
|
||||
for _, printCol := range printerColumns {
|
||||
j := jsonpath.New(printCol.Name)
|
||||
|
||||
err := j.Parse("{" + printCol.JSONPath + "}")
|
||||
if err != nil {
|
||||
cells = append(cells, "<error>")
|
||||
continue
|
||||
}
|
||||
|
||||
results, err := j.FindResults(objMap)
|
||||
if err != nil || len(results) == 0 || len(results[0]) == 0 {
|
||||
cells = append(cells, "<none>")
|
||||
continue
|
||||
}
|
||||
|
||||
cells = append(cells, results[0][0].Interface())
|
||||
}
|
||||
|
||||
return cells
|
||||
}
|
||||
|
||||
func toPointerSlice[T any](v []T) []*T {
|
||||
var vPtr = make([]*T, len(v))
|
||||
|
||||
for i := range v {
|
||||
vPtr[i] = &v[i]
|
||||
}
|
||||
|
||||
return vPtr
|
||||
}
|
||||
@@ -122,7 +122,7 @@ You can check the [k3kcli documentation](./cli/cli-docs.md) for the full specs.
|
||||
* Ephemeral Storage:
|
||||
|
||||
```bash
|
||||
k3kcli cluster create my-cluster --persistence-type ephemeral
|
||||
k3kcli cluster create --persistence-type ephemeral my-cluster
|
||||
```
|
||||
|
||||
*Important Notes:*
|
||||
|
||||
@@ -88,6 +88,25 @@ K3k consists of two main components:
|
||||
* **CLI:** The K3k CLI provides a command-line interface for interacting with K3k. It allows users to easily create, manage, and access virtual clusters. The CLI simplifies common tasks such as creating `Cluster` CRs, retrieving kubeconfigs for accessing virtual clusters, and performing other management operations.
|
||||
|
||||
|
||||
## VirtualClusterPolicy
|
||||
|
||||
K3k introduces the VirtualClusterPolicy Custom Resource, a way to set up and apply common configurations and how your virtual clusters operate within the K3k environment.
|
||||
|
||||
The primary goal of VCPs is to allow administrators to centrally manage and apply consistent policies. This reduces repetitive configuration, helps meet organizational standards, and enhances the security and operational consistency of virtual clusters managed by K3k.
|
||||
|
||||
A VirtualClusterPolicy is bound to one or more Kubernetes Namespaces. Once bound, the rules defined in the VCP apply to all K3k virtual clusters that are running or get created in that Namespace. This allows for flexible policy application, meaning different Namespaces can use their own unique VCPs, while others can share a single VCP for a consistent setup.
|
||||
|
||||
Common use cases for administrators leveraging VirtualClusterPolicy include:
|
||||
|
||||
- Defining the operational mode (like "shared" or "virtual") for virtual clusters.
|
||||
- Setting up resource quotas and limit ranges to effectively manage how much resources virtual clusters and their workloads can use.
|
||||
- Enforcing security standards, for example, by configuring Pod Security Admission (PSA) labels for Namespaces.
|
||||
|
||||
The K3k controller actively monitors VirtualClusterPolicy resources and the corresponding Namespace bindings. When a VCP is applied or updated, the controller ensures that the defined configurations are enforced on the relevant virtual clusters and their associated resources within the targeted Namespaces.
|
||||
|
||||
For a deep dive into what VirtualClusterPolicy can do, along with more examples, check out the [VirtualClusterPolicy Concepts](./virtualclusterpolicy.md) page. For a full list of all the spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crd-docs.md#virtualclusterpolicy).
|
||||
|
||||
|
||||
## Comparison and Trade-offs
|
||||
|
||||
K3k offers two distinct modes for deploying virtual clusters: `shared` and `virtual`. Each mode has its own strengths and weaknesses, and the best choice depends on the specific needs and priorities of the user. Here's a comparison to help you make an informed decision:
|
||||
|
||||
@@ -8,6 +8,8 @@ k3kcli
|
||||
|
||||
```
|
||||
[--debug]
|
||||
[--kubeconfig]=[value]
|
||||
[--namespace|-n]=[value]
|
||||
```
|
||||
|
||||
**Usage**:
|
||||
@@ -20,6 +22,10 @@ k3kcli [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--namespace, -n**="": namespace to create the k3k cluster in
|
||||
|
||||
|
||||
# COMMANDS
|
||||
|
||||
@@ -35,22 +41,30 @@ Create new cluster
|
||||
|
||||
**--agent-args**="": agents extra arguments
|
||||
|
||||
**--agent-envs**="": agents extra Envs
|
||||
|
||||
**--agents**="": number of agents (default: 0)
|
||||
|
||||
**--cluster-cidr**="": cluster CIDR
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--kubeconfig-server**="": override the kubeconfig server host
|
||||
|
||||
**--mode**="": k3k mode type (shared, virtual) (default: "shared")
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
**--namespace, -n**="": namespace to create the k3k cluster in
|
||||
|
||||
**--persistence-type**="": persistence mode for the nodes (dynamic, ephemeral, static) (default: "dynamic")
|
||||
|
||||
**--policy**="": The policy to create the cluster in
|
||||
|
||||
**--server-args**="": servers extra arguments
|
||||
|
||||
**--server-envs**="": servers extra Envs
|
||||
|
||||
**--servers**="": number of servers (default: 1)
|
||||
|
||||
**--service-cidr**="": service CIDR
|
||||
@@ -67,9 +81,67 @@ Delete an existing cluster
|
||||
|
||||
>k3kcli cluster delete [command options] NAME
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--keep-data**: keeps persistence volumes created for the cluster after deletion
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
**--namespace, -n**="": namespace to create the k3k cluster in
|
||||
|
||||
### list
|
||||
|
||||
List all the existing cluster
|
||||
|
||||
>k3kcli cluster list [command options]
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--namespace, -n**="": namespace to create the k3k cluster in
|
||||
|
||||
## policy
|
||||
|
||||
policy command
|
||||
|
||||
### create
|
||||
|
||||
Create new policy
|
||||
|
||||
>k3kcli policy create [command options] NAME
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--mode**="": The allowed mode type of the policy (default: "shared")
|
||||
|
||||
**--namespace, -n**="": namespace to create the k3k cluster in
|
||||
|
||||
### delete
|
||||
|
||||
Delete an existing policy
|
||||
|
||||
>k3kcli policy delete [command options] NAME
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--namespace, -n**="": namespace to create the k3k cluster in
|
||||
|
||||
### list
|
||||
|
||||
List all the existing policies
|
||||
|
||||
>k3kcli policy list [command options]
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--namespace, -n**="": namespace to create the k3k cluster in
|
||||
|
||||
## kubeconfig
|
||||
|
||||
@@ -85,6 +157,8 @@ Generate kubeconfig for clusters
|
||||
|
||||
**--config-name**="": the name of the generated kubeconfig file
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--expiration-days**="": Expiration date of the certificates used for the kubeconfig (default: 356)
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
@@ -93,6 +167,6 @@ Generate kubeconfig for clusters
|
||||
|
||||
**--name**="": cluster name
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
**--namespace, -n**="": namespace to create the k3k cluster in
|
||||
|
||||
**--org**="": Organization name (ORG) of the generated certificates for the kubeconfig
|
||||
|
||||
@@ -1,9 +1,4 @@
|
||||
processor:
|
||||
# RE2 regular expressions describing types that should be excluded from the generated documentation.
|
||||
ignoreTypes:
|
||||
- ClusterSet
|
||||
- ClusterSetList
|
||||
|
||||
# RE2 regular expressions describing type fields that should be excluded from the generated documentation.
|
||||
ignoreFields:
|
||||
- "status$"
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
### Resource Types
|
||||
- [Cluster](#cluster)
|
||||
- [ClusterList](#clusterlist)
|
||||
- [VirtualClusterPolicy](#virtualclusterpolicy)
|
||||
- [VirtualClusterPolicyList](#virtualclusterpolicylist)
|
||||
|
||||
|
||||
|
||||
@@ -51,23 +53,6 @@ _Appears in:_
|
||||
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
|
||||
|
||||
|
||||
#### ClusterLimit
|
||||
|
||||
|
||||
|
||||
ClusterLimit defines resource limits for server and agent nodes.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
|
||||
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
|
||||
|
||||
|
||||
#### ClusterList
|
||||
|
||||
|
||||
@@ -97,6 +82,7 @@ _Validation:_
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
|
||||
|
||||
|
||||
|
||||
@@ -124,12 +110,15 @@ _Appears in:_
|
||||
| `expose` _[ExposeConfig](#exposeconfig)_ | Expose specifies options for exposing the API server.<br />By default, it's only exposed as a ClusterIP. | | |
|
||||
| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector specifies node labels to constrain where server/agent pods are scheduled.<br />In "shared" mode, this also applies to workloads. | | |
|
||||
| `priorityClass` _string_ | PriorityClass specifies the priorityClassName for server/agent pods.<br />In "shared" mode, this also applies to workloads. | | |
|
||||
| `clusterLimit` _[ClusterLimit](#clusterlimit)_ | Limit defines resource limits for server/agent nodes. | | |
|
||||
| `tokenSecretRef` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#secretreference-v1-core)_ | TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.<br />The Secret must have a "token" field in its data. | | |
|
||||
| `tlsSANs` _string array_ | TLSSANs specifies subject alternative names for the K3s server certificate. | | |
|
||||
| `serverArgs` _string array_ | ServerArgs specifies ordered key-value pairs for K3s server pods.<br />Example: ["--tls-san=example.com"] | | |
|
||||
| `agentArgs` _string array_ | AgentArgs specifies ordered key-value pairs for K3s agent pods.<br />Example: ["--node-name=my-agent-node"] | | |
|
||||
| `serverEnvs` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#envvar-v1-core) array_ | ServerEnvs specifies list of environment variables to set in the server pod. | | |
|
||||
| `agentEnvs` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#envvar-v1-core) array_ | AgentEnvs specifies list of environment variables to set in the agent pod. | | |
|
||||
| `addons` _[Addon](#addon) array_ | Addons specifies secrets containing raw YAML to deploy on cluster startup. | | |
|
||||
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
|
||||
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
|
||||
|
||||
|
||||
|
||||
@@ -180,6 +169,10 @@ LoadBalancerConfig specifies options for exposing the API server through a LoadB
|
||||
_Appears in:_
|
||||
- [ExposeConfig](#exposeconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `serverPort` _integer_ | ServerPort is the port on which the K3s server is exposed when type is LoadBalancer.<br />If not specified, the default https 443 port will be allocated.<br />If 0 or negative, the port will not be exposed. | | |
|
||||
| `etcdPort` _integer_ | ETCDPort is the port on which the ETCD service is exposed when type is LoadBalancer.<br />If not specified, the default etcd 2379 port will be allocated.<br />If 0 or negative, the port will not be exposed. | | |
|
||||
|
||||
|
||||
#### NodePortConfig
|
||||
@@ -195,9 +188,8 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `serverPort` _integer_ | ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
|
||||
| `servicePort` _integer_ | ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
|
||||
| `etcdPort` _integer_ | ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
|
||||
| `serverPort` _integer_ | ServerPort is the port on each node on which the K3s server is exposed when type is NodePort.<br />If not specified, a random port between 30000-32767 will be allocated.<br />If out of range, the port will not be exposed. | | |
|
||||
| `etcdPort` _integer_ | ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.<br />If not specified, a random port between 30000-32767 will be allocated.<br />If out of range, the port will not be exposed. | | |
|
||||
|
||||
|
||||
#### PersistenceConfig
|
||||
@@ -232,5 +224,79 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
#### PodSecurityAdmissionLevel
|
||||
|
||||
_Underlying type:_ _string_
|
||||
|
||||
PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
|
||||
|
||||
_Validation:_
|
||||
- Enum: [privileged baseline restricted]
|
||||
|
||||
_Appears in:_
|
||||
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
|
||||
|
||||
|
||||
|
||||
#### VirtualClusterPolicy
|
||||
|
||||
|
||||
|
||||
VirtualClusterPolicy allows defining common configurations and constraints
|
||||
for clusters within a clusterpolicy.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [VirtualClusterPolicyList](#virtualclusterpolicylist)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
|
||||
| `kind` _string_ | `VirtualClusterPolicy` | | |
|
||||
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `spec` _[VirtualClusterPolicySpec](#virtualclusterpolicyspec)_ | Spec defines the desired state of the VirtualClusterPolicy. | \{ \} | |
|
||||
|
||||
|
||||
#### VirtualClusterPolicyList
|
||||
|
||||
|
||||
|
||||
VirtualClusterPolicyList is a list of VirtualClusterPolicy resources.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
|
||||
| `kind` _string_ | `VirtualClusterPolicyList` | | |
|
||||
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `items` _[VirtualClusterPolicy](#virtualclusterpolicy) array_ | | | |
|
||||
|
||||
|
||||
#### VirtualClusterPolicySpec
|
||||
|
||||
|
||||
|
||||
VirtualClusterPolicySpec defines the desired state of a VirtualClusterPolicy.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [VirtualClusterPolicy](#virtualclusterpolicy)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `quota` _[ResourceQuotaSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcequotaspec-v1-core)_ | Quota specifies the resource limits for clusters within a clusterpolicy. | | |
|
||||
| `limit` _[LimitRangeSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#limitrangespec-v1-core)_ | Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy<br />to set defaults and constraints (min/max) | | |
|
||||
| `defaultNodeSelector` _object (keys:string, values:string)_ | DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace. | | |
|
||||
| `defaultPriorityClass` _string_ | DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the target Namespace. | | |
|
||||
| `allowedMode` _[ClusterMode](#clustermode)_ | AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared". | shared | Enum: [shared virtual] <br /> |
|
||||
| `disableNetworkPolicy` _boolean_ | DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation. | | |
|
||||
| `podSecurityAdmissionLevel` _[PodSecurityAdmissionLevel](#podsecurityadmissionlevel)_ | PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace. | | Enum: [privileged baseline restricted] <br /> |
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ To see all the available Make commands you can run `make help`, i.e:
|
||||
|
||||
```
|
||||
-> % make help
|
||||
all Run 'make' or 'make all' to run 'version', 'build-crds', 'build' and 'package'
|
||||
all Run 'make' or 'make all' to run 'version', 'generate', 'build' and 'package'
|
||||
version Print the current version
|
||||
build Build the the K3k binaries (k3k, k3k-kubelet and k3kcli)
|
||||
package Package the k3k and k3k-kubelet Docker images
|
||||
@@ -42,8 +42,8 @@ To see all the available Make commands you can run `make help`, i.e:
|
||||
test-unit Run the unit tests (skips the e2e)
|
||||
test-controller Run the controller tests (pkg/controller)
|
||||
test-e2e Run the e2e tests
|
||||
build-crds Build the CRDs specs
|
||||
docs Build the CRDs docs
|
||||
generate Generate the CRDs specs
|
||||
docs Build the CRDs and CLI docs
|
||||
lint Find any linting issues in the project
|
||||
validate Validate the project checking for any dependency or doc mismatch
|
||||
install Install K3k with Helm on the targeted Kubernetes cluster
|
||||
@@ -88,7 +88,7 @@ The required binaries for `envtest` are installed with [`setup-envtest`](https:/
|
||||
|
||||
## CRDs and Docs
|
||||
|
||||
We are using Kubebuilder and `controller-gen` to build the needed CRDs. To generate the specs you can run `make build-crds`.
|
||||
We are using Kubebuilder and `controller-gen` to build the needed CRDs. To generate the specs you can run `make generate`.
|
||||
|
||||
Remember also to update the CRDs documentation running the `make docs` command.
|
||||
|
||||
@@ -114,7 +114,7 @@ Install now k3k as usual:
|
||||
|
||||
```bash
|
||||
helm repo update
|
||||
helm install --namespace k3k-system --create-namespace k3k k3k/k3k --devel
|
||||
helm install --namespace k3k-system --create-namespace k3k k3k/k3k
|
||||
```
|
||||
|
||||
### Create a virtual cluster
|
||||
|
||||
83
docs/howtos/airgap.md
Normal file
83
docs/howtos/airgap.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# K3k Air Gap Installation Guide
|
||||
|
||||
Applicable K3k modes: `virtual`, `shared`
|
||||
|
||||
This guide describes how to deploy **K3k** in an **air-gapped environment**, including the packaging of required images, Helm chart configurations, and cluster creation using a private container registry.
|
||||
|
||||
---
|
||||
|
||||
## 1. Package Required Container Images
|
||||
|
||||
### 1.1: Follow K3s Air Gap Preparation
|
||||
|
||||
Begin with the official K3s air gap packaging instructions:
|
||||
[K3s Air Gap Installation Docs](https://docs.k3s.io/installation/airgap)
|
||||
|
||||
### 1.2: Include K3k-Specific Images
|
||||
|
||||
In addition to the K3s images, make sure to include the following in your image bundle:
|
||||
|
||||
| Image Names | Descriptions |
|
||||
| --------------------------- | --------------------------------------------------------------- |
|
||||
| `rancher/k3k:<tag>` | K3k controller image (replace `<tag>` with the desired version) |
|
||||
| `rancher/k3k-kubelet:<tag>` | K3k agent image for shared mode |
|
||||
| `rancher/k3s:<tag>` | K3s server/agent image for virtual clusters |
|
||||
|
||||
Load these images into your internal (air-gapped) registry.
|
||||
|
||||
---
|
||||
|
||||
## 2. Configure Helm Chart for Air Gap installation
|
||||
|
||||
Update the `values.yaml` file in the K3k Helm chart with air gap settings:
|
||||
|
||||
```yaml
|
||||
image:
|
||||
repository: rancher/k3k
|
||||
tag: "" # Specify the version tag
|
||||
pullPolicy: "" # Optional: "IfNotPresent", "Always", etc.
|
||||
|
||||
sharedAgent:
|
||||
image:
|
||||
repository: rancher/k3k-kubelet
|
||||
tag: "" # Specify the version tag
|
||||
pullPolicy: "" # Optional
|
||||
|
||||
k3sServer:
|
||||
image:
|
||||
repository: rancher/k3s
|
||||
pullPolicy: "" # Optional
|
||||
```
|
||||
|
||||
These values enforce the use of internal image repositories for the K3k controller, the agent and the server.
|
||||
|
||||
**Note** : All virtual clusters will use automatically those settings.
|
||||
|
||||
---
|
||||
|
||||
## 3. Enforce Registry in Virtual Clusters
|
||||
|
||||
When creating a virtual cluster, use the `--system-default-registry` flag to ensure all system components (e.g., CoreDNS) pull from your internal registry:
|
||||
|
||||
```bash
|
||||
k3kcli cluster create \
|
||||
--server-args "--system-default-registry=registry.internal.domain" \
|
||||
my-cluster
|
||||
```
|
||||
|
||||
This flag is passed directly to the K3s server in the virtual cluster, influencing all system workload image pulls.
|
||||
[K3s Server CLI Reference](https://docs.k3s.io/cli/server#k3s-server-cli-help)
|
||||
|
||||
---
|
||||
|
||||
## 4. Specify K3s Version for Virtual Clusters
|
||||
|
||||
K3k allows specifying the K3s version used in each virtual cluster:
|
||||
|
||||
```bash
|
||||
k3kcli cluster create \
|
||||
--k3s-version v1.29.4+k3s1 \
|
||||
my-cluster
|
||||
```
|
||||
|
||||
- If omitted, the **host cluster’s K3s version** will be used by default, which might not exist if it's not part of the air gap package.
|
||||
79
docs/howtos/choose-mode.md
Normal file
79
docs/howtos/choose-mode.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# How to Choose Between Shared and Virtual Mode
|
||||
|
||||
This guide helps you choose the right mode for your virtual cluster: **Shared** or **Virtual**.
|
||||
If you're unsure, start with **Shared mode** — it's the default and fits most common scenarios.
|
||||
|
||||
---
|
||||
|
||||
## Shared Mode (default)
|
||||
|
||||
**Best for:**
|
||||
- Developers who want to run workloads quickly without managing Kubernetes internals
|
||||
- Platform teams that require visibility and control over all workloads
|
||||
- Users who need access to host-level resources (e.g., GPUs)
|
||||
|
||||
In **Shared mode**, the virtual cluster runs its own K3s server but relies on the host to execute workloads. The virtual kubelet syncs resources, enabling lightweight, fast provisioning with support for cluster resource isolation. More details on the [architecture](./../architecture.md#shared-mode).
|
||||
|
||||
---
|
||||
|
||||
### Use Cases by Persona
|
||||
|
||||
#### 👩💻 Developer
|
||||
*"I’m building a web app that should be exposed outside the virtual cluster."*
|
||||
→ Use **Shared mode**. It allows you to [expose](./expose-workloads.md) your application.
|
||||
|
||||
#### 👩🔬 Data Scientist:
|
||||
*“I need to run Jupyter notebooks that leverage the cluster's GPU.”*
|
||||
→ Use **Shared mode**. It gives access to physical devices while keeping overhead low.
|
||||
|
||||
#### 🧑💼 Platform Admin
|
||||
*"I want to monitor and secure all tenant workloads from a central location."*
|
||||
→ Use **Shared mode**. Host-level agents (e.g., observability, policy enforcement) work across all virtual clusters.
|
||||
|
||||
#### 🔒 Security Engineer
|
||||
*"I need to enforce security policies like network policies or runtime scanning across all workloads."*
|
||||
→ Use **Shared mode**. The platform can enforce policies globally without tenant bypass.
|
||||
|
||||
*"I need to test a new admission controller or policy engine."*
|
||||
→ Use **Shared mode**, if it's scoped to your virtual cluster. You can run tools like Kubewarden without affecting the host.
|
||||
|
||||
#### 🔁 CI/CD Engineer
|
||||
*"I want to spin up disposable virtual clusters per pipeline run, fast and with low resource cost."*
|
||||
→ Use **Shared mode**. It's quick to provision and ideal for short-lived, namespace-scoped environments.
|
||||
|
||||
---
|
||||
|
||||
## Virtual Mode
|
||||
|
||||
**Best for:**
|
||||
- Advanced users who need full Kubernetes isolation
|
||||
- Developers testing experimental or cluster-wide features
|
||||
- Use cases requiring control over the entire Kubernetes control plane
|
||||
|
||||
In **Virtual mode**, the virtual cluster runs its own isolated Kubernetes control plane. It supports different CNIs, and API configurations — ideal for deep experimentation or advanced workloads. More details on the [architecture](./../architecture.md#virtual-mode).
|
||||
|
||||
---
|
||||
|
||||
### Use Cases by Persona
|
||||
|
||||
#### 👩💻 Developer
|
||||
*"I need to test a new Kubernetes feature gate that’s disabled in the host cluster."*
|
||||
→ Use **Virtual mode**. You can configure your own control plane flags and API features.
|
||||
|
||||
#### 🧑💼 Platform Admin
|
||||
*"We’re testing upgrades across Kubernetes versions, including new API behaviors."*
|
||||
→ Use Virtual mode. You can run different Kubernetes versions and safely validate upgrade paths.
|
||||
|
||||
#### 🌐 Network Engineer
|
||||
*"I’m evaluating a new CNI that needs full control of the cluster’s networking."*
|
||||
→ Use **Virtual mode**. You can run a separate CNI stack without affecting the host or other tenants.
|
||||
|
||||
#### 🔒 Security Engineer
|
||||
*"I’m testing a new admission controller and policy engine before rolling it out cluster-wide."*
|
||||
→ Use **Virtual mode**, if you need to test cluster-wide policies, custom admission flow, or advanced extensions with full control.
|
||||
|
||||
---
|
||||
|
||||
## Still Not Sure?
|
||||
|
||||
If you're evaluating more advanced use cases or want a deeper comparison, see the full trade-off breakdown in the [Architecture documentation](../architecture.md).
|
||||
52
docs/howtos/expose-workloads.md
Normal file
52
docs/howtos/expose-workloads.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# How-to: Expose Workloads Outside the Virtual Cluster
|
||||
|
||||
This guide explains how to expose workloads running in k3k-managed virtual clusters to external networks. Behavior varies depending on the operating mode of the virtual cluster.
|
||||
|
||||
## Virtual Mode
|
||||
|
||||
> [!CAUTION]
|
||||
> **Not Supported**
|
||||
> In *virtual mode*, direct external exposure of workloads is **not available**.
|
||||
> This mode is designed for strong isolation and does not expose the virtual cluster's network directly.
|
||||
|
||||
## Shared Mode
|
||||
|
||||
In *shared mode*, workloads can be exposed to the external network using standard Kubernetes service types or an ingress controller, depending on your requirements.
|
||||
|
||||
> [!NOTE]
|
||||
> *`Services`* are always synced from the virtual cluster to the host cluster following the same principle described [here](../architecture.md#shared-mode) for pods.
|
||||
|
||||
### Option 1: Use `NodePort` or `LoadBalancer`
|
||||
|
||||
To expose a service such as a web application outside the host cluster:
|
||||
|
||||
- **`NodePort`**:
|
||||
Exposes the service on a static port on each node’s IP.
|
||||
Access the service at `http://<NodeIP>:<NodePort>`.
|
||||
|
||||
- **`LoadBalancer`**:
|
||||
Provisions an external load balancer (if supported by the environment) and exposes the service via the load balancer’s IP.
|
||||
|
||||
> **Note**
|
||||
> The `LoadBalancer` IP is currently not reflected back to the virtual cluster service.
|
||||
> [k3k issue #365](https://github.com/rancher/k3k/issues/365)
|
||||
|
||||
### Option 2: Use `ClusterIP` for Internal Communication
|
||||
|
||||
If the workload should only be accessible to other services or pods *within* the host cluster:
|
||||
|
||||
- Use the `ClusterIP` service type.
|
||||
This exposes the service on an internal IP, only reachable inside the host cluster.
|
||||
|
||||
### Option 3: Use Ingress for HTTP/HTTPS Routing
|
||||
|
||||
For more advanced routing (e.g., hostname- or path-based routing), deploy an **Ingress controller** in the virtual cluster, and expose it via `NodePort` or `LoadBalancer`.
|
||||
|
||||
This allows you to:
|
||||
|
||||
- Define Ingress resources in the virtual cluster.
|
||||
- Route external traffic to services within the virtual cluster.
|
||||
|
||||
>**Note**
|
||||
> Support for using the host cluster's Ingress controller from a virtual cluster is being tracked in
|
||||
> [k3k issue #356](https://github.com/rancher/k3k/issues/356)
|
||||
147
docs/virtualclusterpolicy.md
Normal file
147
docs/virtualclusterpolicy.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# VirtualClusterPolicy
|
||||
|
||||
The VirtualClusterPolicy Custom Resource in K3k provides a way to define and enforce consistent configurations, security settings, and resource management rules for your virtual clusters and the Namespaces they operate within.
|
||||
|
||||
By using VCPs, administrators can centrally manage these aspects, reducing manual configuration, ensuring alignment with organizational standards, and enhancing the overall security and operational consistency of the K3k environment.
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### What is a VirtualClusterPolicy?
|
||||
|
||||
A `VirtualClusterPolicy` is a cluster-scoped Kubernetes Custom Resource that specifies a set of rules and configurations. These policies are then applied to K3k virtual clusters (`Cluster` resources) operating within Kubernetes Namespaces that are explicitly bound to a VCP.
|
||||
|
||||
### Binding a Policy to a Namespace
|
||||
|
||||
To apply a `VirtualClusterPolicy` to one or more Namespaces (and thus to all K3k `Cluster` resources within those Namespaces), you need to label the desired Namespace(s). Add the following label to your Namespace metadata:
|
||||
|
||||
`policy.k3k.io/policy-name: <YOUR_POLICY_NAME>`
|
||||
|
||||
**Example: Labeling a Namespace**
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: my-app-namespace
|
||||
labels:
|
||||
policy.k3k.io/policy-name: "standard-dev-policy"
|
||||
```
|
||||
|
||||
In this example, `my-app-namespace` will adhere to the rules defined in the `VirtualClusterPolicy` named `standard-dev-policy`. Multiple Namespaces can be bound to the same policy for uniform configuration, or different Namespaces can be bound to distinct policies.
|
||||
|
||||
It's also important to note what happens when a Namespace's policy binding changes. If a Namespace is unbound from a VirtualClusterPolicy (by removing the policy.k3k.io/policy-name label), K3k will clean up and remove the resources (such as ResourceQuotas, LimitRanges, and managed Namespace labels) that were originally applied by that policy. Similarly, if the label is changed to bind the Namespace to a new VirtualClusterPolicy, K3k will first remove the resources associated with the old policy before applying the configurations from the new one, ensuring a clean transition.
|
||||
|
||||
### Default Policy Values
|
||||
|
||||
If you create a `VirtualClusterPolicy` without specifying any `spec` fields (e.g., using `k3kcli policy create my-default-policy`), it will be created with default settings. Currently, this includes `spec.allowedMode` being set to `"shared"`.
|
||||
|
||||
```yaml
|
||||
# Example of a minimal VCP (after creation with defaults)
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: my-default-policy
|
||||
spec:
|
||||
allowedMode: shared
|
||||
```
|
||||
|
||||
## Key Capabilities & Examples
|
||||
|
||||
A `VirtualClusterPolicy` can configure several aspects of the Namespaces it's bound to and the virtual clusters operating within them.
|
||||
|
||||
### 1. Restricting Allowed Virtual Cluster Modes (`AllowedMode`)
|
||||
|
||||
You can restrict the `mode` (e.g., "shared" or "virtual") in which K3k `Cluster` resources can be provisioned within bound Namespaces. If a `Cluster` is created in a bound Namespace with a mode not allowed in `allowedMode`, its creation might proceed but an error should be reported in the `Cluster` resource's status.
|
||||
|
||||
**Example:** Allow only "shared" mode clusters.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: shared-only-policy
|
||||
spec:
|
||||
allowedModeTypes:
|
||||
- shared
|
||||
```
|
||||
|
||||
You can also specify this using the CLI: `k3kcli policy create --mode shared shared-only-policy` (or `--mode virtual`).
|
||||
|
||||
### 2. Defining Resource Quotas (`quota`)
|
||||
|
||||
You can define resource consumption limits for bound Namespaces by specifying a `ResourceQuota`. K3k will create a `ResourceQuota` object in each bound Namespace with the provided specifications.
|
||||
|
||||
**Example:** Set CPU, memory, and pod limits.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: quota-policy
|
||||
spec:
|
||||
quota:
|
||||
hard:
|
||||
cpu: "10"
|
||||
memory: "20Gi"
|
||||
pods: "10"
|
||||
```
|
||||
|
||||
### 3. Setting Limit Ranges (`limit`)
|
||||
|
||||
You can define default resource requests/limits and min/max constraints for containers running in bound Namespaces by specifying a `LimitRange`. K3k will create a `LimitRange` object in each bound Namespace.
|
||||
|
||||
**Example:** Define default CPU requests/limits and min/max CPU.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: limit-policy
|
||||
spec:
|
||||
limit:
|
||||
limits:
|
||||
- default:
|
||||
cpu: "500m"
|
||||
defaultRequest:
|
||||
cpu: "500m"
|
||||
max:
|
||||
cpu: "1"
|
||||
min:
|
||||
cpu: "100m"
|
||||
type: Container
|
||||
```
|
||||
|
||||
### 4. Managing Network Isolation (`disableNetworkPolicy`)
|
||||
|
||||
By default, K3k creates a `NetworkPolicy` in bound Namespaces to provide network isolation for virtual clusters (especially in shared mode). You can disable the creation of this default policy.
|
||||
|
||||
**Example:** Disable the default NetworkPolicy.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: no-default-netpol-policy
|
||||
spec:
|
||||
disableNetworkPolicy: true
|
||||
```
|
||||
|
||||
### 5. Enforcing Pod Security Admission (`podSecurityAdmissionLevel`)
|
||||
|
||||
You can enforce Pod Security Standards (PSS) by specifying a Pod Security Admission (PSA) level. K3k will apply the corresponding PSA labels to each bound Namespace. The allowed values are `privileged`, `baseline`, `restricted`, and this will add labels like `pod-security.kubernetes.io/enforce: <level>` to the bound Namespace.
|
||||
|
||||
**Example:** Enforce the "baseline" PSS level.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: baseline-psa-policy
|
||||
spec:
|
||||
podSecurityAdmissionLevel: baseline
|
||||
```
|
||||
|
||||
## Further Reading
|
||||
|
||||
* For a complete reference of all `VirtualClusterPolicy` spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crd-docs.md#virtualclusterpolicy).
|
||||
* To understand how VCPs fit into the overall K3k system, see the [Architecture](./architecture.md) document.
|
||||
@@ -1,11 +1,9 @@
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: ClusterSet
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: clusterset-example
|
||||
name: policy-example
|
||||
# spec:
|
||||
# disableNetworkPolicy: false
|
||||
# allowedNodeTypes:
|
||||
# - "shared"
|
||||
# - "virtual"
|
||||
# allowedMode: "shared"
|
||||
# podSecurityAdmissionLevel: "baseline"
|
||||
# defaultPriorityClass: "lowpriority"
|
||||
|
||||
8
go.mod
8
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/rancher/k3k
|
||||
|
||||
go 1.23.4
|
||||
go 1.24.2
|
||||
|
||||
replace (
|
||||
github.com/google/cel-go => github.com/google/cel-go v0.17.7
|
||||
@@ -28,11 +28,14 @@ require (
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
helm.sh/helm/v3 v3.14.4
|
||||
k8s.io/api v0.29.11
|
||||
k8s.io/apiextensions-apiserver v0.29.11
|
||||
k8s.io/apimachinery v0.29.11
|
||||
k8s.io/apiserver v0.29.11
|
||||
k8s.io/cli-runtime v0.29.11
|
||||
k8s.io/client-go v0.29.11
|
||||
k8s.io/component-base v0.29.11
|
||||
k8s.io/component-helpers v0.29.11
|
||||
k8s.io/kubectl v0.29.11
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
sigs.k8s.io/controller-runtime v0.17.5
|
||||
)
|
||||
@@ -202,12 +205,9 @@ require (
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.29.11 // indirect
|
||||
k8s.io/cli-runtime v0.29.11 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kms v0.29.11 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
k8s.io/kubectl v0.29.11 // indirect
|
||||
oras.land/oras-go v1.2.5 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
set -x
|
||||
CODEGEN_GIT_PKG=https://github.com/kubernetes/code-generator.git
|
||||
git clone --depth 1 ${CODEGEN_GIT_PKG} || true
|
||||
|
||||
K8S_VERSION=$(cat go.mod | grep -m1 "k8s.io/apiserver" | cut -d " " -f 2)
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
CODEGEN_PKG=./code-generator
|
||||
|
||||
# cd into the git dir to checkout the code gen version compatible with the k8s version that this is using
|
||||
cd $CODEGEN_PKG
|
||||
git fetch origin tag ${K8S_VERSION}
|
||||
git checkout ${K8S_VERSION}
|
||||
cd -
|
||||
|
||||
source ${CODEGEN_PKG}/kube_codegen.sh
|
||||
|
||||
kube::codegen::gen_helpers \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
--input-pkg-root "${SCRIPT_ROOT}/pkg/apis" \
|
||||
--output-base "${SCRIPT_ROOT}/pkg/apis"
|
||||
|
||||
rm -rf code-generator
|
||||
@@ -336,7 +336,7 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("https://%s:%d", server.ServiceName(cluster.Name), server.ServerPort)
|
||||
url := "https://" + server.ServiceName(cluster.Name)
|
||||
|
||||
kubeconfigData, err := kubeconfigBytes(url, []byte(b.ServerCA.Content), adminCert, adminKey)
|
||||
if err != nil {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/api"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
|
||||
@@ -31,6 +33,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"errors"
|
||||
|
||||
@@ -40,6 +43,7 @@ import (
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
)
|
||||
|
||||
@@ -365,7 +369,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
|
||||
// setting the hostname for the pod if its not set
|
||||
if pod.Spec.Hostname == "" {
|
||||
tPod.Spec.Hostname = pod.Name
|
||||
tPod.Spec.Hostname = k3kcontroller.SafeConcatName(pod.Name)
|
||||
}
|
||||
|
||||
// if the priorityCluss for the virtual cluster is set then override the provided value
|
||||
@@ -398,6 +402,11 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
|
||||
)
|
||||
|
||||
// set ownerReference to the cluster object
|
||||
if err := controllerutil.SetControllerReference(&cluster, tPod, p.HostClient.Scheme()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.HostClient.Create(ctx, tPod)
|
||||
}
|
||||
|
||||
@@ -480,18 +489,22 @@ func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, vo
|
||||
if err := p.syncSecret(ctx, podNamespace, secretName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync projected secret %s: %w", secretName, err)
|
||||
}
|
||||
|
||||
source.Secret.Name = p.Translator.TranslateName(podNamespace, secretName)
|
||||
}
|
||||
}
|
||||
} else if volume.PersistentVolumeClaim != nil {
|
||||
volume.PersistentVolumeClaim.ClaimName = p.Translator.TranslateName(podNamespace, volume.PersistentVolumeClaim.ClaimName)
|
||||
} else if volume.DownwardAPI != nil {
|
||||
for _, downwardAPI := range volume.DownwardAPI.Items {
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNameField {
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
}
|
||||
if downwardAPI.FieldRef != nil {
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNameField {
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
}
|
||||
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNamespaceField {
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNamespaceField {
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -602,6 +615,10 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
|
||||
currentHostPod.Spec.ActiveDeadlineSeconds = pod.Spec.ActiveDeadlineSeconds
|
||||
currentHostPod.Spec.Tolerations = pod.Spec.Tolerations
|
||||
|
||||
// in the virtual cluster we can update also the labels and annotations
|
||||
maps.Copy(currentHostPod.Annotations, pod.Annotations)
|
||||
maps.Copy(currentHostPod.Labels, pod.Labels)
|
||||
|
||||
if err := p.HostClient.Update(ctx, ¤tHostPod); err != nil {
|
||||
return fmt.Errorf("unable to update pod in the host cluster: %w", err)
|
||||
}
|
||||
@@ -805,7 +822,7 @@ func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP
|
||||
})
|
||||
|
||||
// injecting cluster DNS IP to the pods except for coredns pod
|
||||
if !strings.HasPrefix(podName, "coredns") {
|
||||
if !strings.HasPrefix(podName, "coredns") && pod.Spec.DNSConfig == nil {
|
||||
pod.Spec.DNSPolicy = corev1.DNSNone
|
||||
pod.Spec.DNSConfig = &corev1.PodDNSConfig{
|
||||
Nameservers: []string{
|
||||
@@ -816,17 +833,20 @@ func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP
|
||||
"svc.cluster.local",
|
||||
"cluster.local",
|
||||
},
|
||||
Options: []v1.PodDNSConfigOption{
|
||||
{
|
||||
Name: "ndots",
|
||||
Value: ptr.To("5"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
updatedEnvVars := []corev1.EnvVar{
|
||||
{Name: "KUBERNETES_PORT", Value: "tcp://" + serverIP + ":6443"},
|
||||
{Name: "KUBERNETES_SERVICE_HOST", Value: serverIP},
|
||||
{Name: "KUBERNETES_SERVICE_PORT", Value: "6443"},
|
||||
{Name: "KUBERNETES_SERVICE_PORT_HTTPS", Value: "6443"},
|
||||
{Name: "KUBERNETES_PORT_443_TCP", Value: "tcp://" + serverIP + ":6443"},
|
||||
{Name: "KUBERNETES_PORT", Value: "tcp://" + serverIP + ":443"},
|
||||
{Name: "KUBERNETES_PORT_443_TCP", Value: "tcp://" + serverIP + ":443"},
|
||||
{Name: "KUBERNETES_PORT_443_TCP_ADDR", Value: serverIP},
|
||||
{Name: "KUBERNETES_PORT_443_TCP_PORT", Value: "6443"},
|
||||
}
|
||||
|
||||
// inject networking information to the pod's environment variables
|
||||
|
||||
37
main.go
37
main.go
@@ -1,4 +1,4 @@
|
||||
//go:generate ./hack/update-codegen.sh
|
||||
//go:generate ./scripts/generate
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/clusterset"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
"go.uber.org/zap"
|
||||
@@ -31,6 +31,8 @@ var (
|
||||
sharedAgentImage string
|
||||
sharedAgentImagePullPolicy string
|
||||
kubeconfig string
|
||||
k3SImage string
|
||||
k3SImagePullPolicy string
|
||||
debug bool
|
||||
logger *log.Logger
|
||||
flags = []cli.Flag{
|
||||
@@ -43,7 +45,7 @@ var (
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-cidr",
|
||||
EnvVars: []string{"CLUSTER_CIDR"},
|
||||
Usage: "Cluster CIDR to be added to the networkpolicy of the clustersets",
|
||||
Usage: "Cluster CIDR to be added to the networkpolicy",
|
||||
Destination: &clusterCIDR,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@@ -65,6 +67,19 @@ var (
|
||||
Usage: "Debug level logging",
|
||||
Destination: &debug,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "k3s-image",
|
||||
EnvVars: []string{"K3S_IMAGE"},
|
||||
Usage: "K3K server image",
|
||||
Value: "rancher/k3k",
|
||||
Destination: &k3SImage,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "k3s-image-pull-policy",
|
||||
EnvVars: []string{"K3S_IMAGE_PULL_POLICY"},
|
||||
Usage: "K3K server image pull policy",
|
||||
Destination: &k3SImagePullPolicy,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -115,7 +130,7 @@ func run(clx *cli.Context) error {
|
||||
|
||||
logger.Info("adding cluster controller")
|
||||
|
||||
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy); err != nil {
|
||||
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage, k3SImagePullPolicy); err != nil {
|
||||
return fmt.Errorf("failed to add the new cluster controller: %v", err)
|
||||
}
|
||||
|
||||
@@ -125,18 +140,10 @@ func run(clx *cli.Context) error {
|
||||
return fmt.Errorf("failed to add the new cluster controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("adding clusterset controller")
|
||||
logger.Info("adding clusterpolicy controller")
|
||||
|
||||
if err := clusterset.Add(ctx, mgr, clusterCIDR); err != nil {
|
||||
return fmt.Errorf("failed to add the clusterset controller: %v", err)
|
||||
}
|
||||
|
||||
if clusterCIDR == "" {
|
||||
logger.Info("adding networkpolicy node controller")
|
||||
|
||||
if err := clusterset.AddNodeController(ctx, mgr); err != nil {
|
||||
return fmt.Errorf("failed to add the clusterset node controller: %v", err)
|
||||
}
|
||||
if err := policy.Add(mgr, clusterCIDR); err != nil {
|
||||
return fmt.Errorf("failed to add the clusterpolicy controller: %v", err)
|
||||
}
|
||||
|
||||
if err := mgr.Start(ctx); err != nil {
|
||||
|
||||
@@ -21,8 +21,8 @@ func addKnownTypes(s *runtime.Scheme) error {
|
||||
s.AddKnownTypes(SchemeGroupVersion,
|
||||
&Cluster{},
|
||||
&ClusterList{},
|
||||
&ClusterSet{},
|
||||
&ClusterSetList{},
|
||||
&VirtualClusterPolicy{},
|
||||
&VirtualClusterPolicyList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.mode",name=Mode,type=string
|
||||
|
||||
// Cluster defines a virtual Kubernetes cluster managed by k3k.
|
||||
// It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
|
||||
@@ -114,11 +115,6 @@ type ClusterSpec struct {
|
||||
// +optional
|
||||
PriorityClass string `json:"priorityClass,omitempty"`
|
||||
|
||||
// Limit defines resource limits for server/agent nodes.
|
||||
//
|
||||
// +optional
|
||||
Limit *ClusterLimit `json:"clusterLimit,omitempty"`
|
||||
|
||||
// TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.
|
||||
// The Secret must have a "token" field in its data.
|
||||
//
|
||||
@@ -142,10 +138,30 @@ type ClusterSpec struct {
|
||||
// +optional
|
||||
AgentArgs []string `json:"agentArgs,omitempty"`
|
||||
|
||||
// ServerEnvs specifies list of environment variables to set in the server pod.
|
||||
//
|
||||
// +optional
|
||||
ServerEnvs []v1.EnvVar `json:"serverEnvs,omitempty"`
|
||||
|
||||
// AgentEnvs specifies list of environment variables to set in the agent pod.
|
||||
//
|
||||
// +optional
|
||||
AgentEnvs []v1.EnvVar `json:"agentEnvs,omitempty"`
|
||||
|
||||
// Addons specifies secrets containing raw YAML to deploy on cluster startup.
|
||||
//
|
||||
// +optional
|
||||
Addons []Addon `json:"addons,omitempty"`
|
||||
|
||||
// ServerLimit specifies resource limits for server nodes.
|
||||
//
|
||||
// +optional
|
||||
ServerLimit v1.ResourceList `json:"serverLimit,omitempty"`
|
||||
|
||||
// WorkerLimit specifies resource limits for agent nodes.
|
||||
//
|
||||
// +optional
|
||||
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterMode is the possible provisioning mode of a Cluster.
|
||||
@@ -175,15 +191,6 @@ const (
|
||||
DynamicPersistenceMode = PersistenceMode("dynamic")
|
||||
)
|
||||
|
||||
// ClusterLimit defines resource limits for server and agent nodes.
|
||||
type ClusterLimit struct {
|
||||
// ServerLimit specifies resource limits for server nodes.
|
||||
ServerLimit v1.ResourceList `json:"serverLimit,omitempty"`
|
||||
|
||||
// WorkerLimit specifies resource limits for agent nodes.
|
||||
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
|
||||
}
|
||||
|
||||
// Addon specifies a Secret containing YAML to be deployed on cluster startup.
|
||||
type Addon struct {
|
||||
// SecretNamespace is the namespace of the Secret.
|
||||
@@ -245,24 +252,34 @@ type IngressConfig struct {
|
||||
}
|
||||
|
||||
// LoadBalancerConfig specifies options for exposing the API server through a LoadBalancer service.
|
||||
type LoadBalancerConfig struct{}
|
||||
|
||||
// NodePortConfig specifies options for exposing the API server through NodePort.
|
||||
type NodePortConfig struct {
|
||||
// ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.
|
||||
// If not specified, a port will be allocated (default: 30000-32767).
|
||||
type LoadBalancerConfig struct {
|
||||
// ServerPort is the port on which the K3s server is exposed when type is LoadBalancer.
|
||||
// If not specified, the default https 443 port will be allocated.
|
||||
// If 0 or negative, the port will not be exposed.
|
||||
//
|
||||
// +optional
|
||||
ServerPort *int32 `json:"serverPort,omitempty"`
|
||||
|
||||
// ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.
|
||||
// If not specified, a port will be allocated (default: 30000-32767).
|
||||
// ETCDPort is the port on which the ETCD service is exposed when type is LoadBalancer.
|
||||
// If not specified, the default etcd 2379 port will be allocated.
|
||||
// If 0 or negative, the port will not be exposed.
|
||||
//
|
||||
// +optional
|
||||
ServicePort *int32 `json:"servicePort,omitempty"`
|
||||
ETCDPort *int32 `json:"etcdPort,omitempty"`
|
||||
}
|
||||
|
||||
// NodePortConfig specifies options for exposing the API server through NodePort.
|
||||
type NodePortConfig struct {
|
||||
// ServerPort is the port on each node on which the K3s server is exposed when type is NodePort.
|
||||
// If not specified, a random port between 30000-32767 will be allocated.
|
||||
// If out of range, the port will not be exposed.
|
||||
//
|
||||
// +optional
|
||||
ServerPort *int32 `json:"serverPort,omitempty"`
|
||||
|
||||
// ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
|
||||
// If not specified, a port will be allocated (default: 30000-32767).
|
||||
// If not specified, a random port between 30000-32767 will be allocated.
|
||||
// If out of range, the port will not be exposed.
|
||||
//
|
||||
// +optional
|
||||
ETCDPort *int32 `json:"etcdPort,omitempty"`
|
||||
@@ -317,54 +334,56 @@ type ClusterList struct {
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.allowedMode",name=Mode,type=string
|
||||
// +kubebuilder:resource:scope=Cluster,shortName=vcp
|
||||
|
||||
// ClusterSet represents a group of virtual Kubernetes clusters managed by k3k.
|
||||
// It allows defining common configurations and constraints for the clusters within the set.
|
||||
type ClusterSet struct {
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// VirtualClusterPolicy allows defining common configurations and constraints
|
||||
// for clusters within a clusterpolicy.
|
||||
type VirtualClusterPolicy struct {
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Spec defines the desired state of the ClusterSet.
|
||||
// Spec defines the desired state of the VirtualClusterPolicy.
|
||||
//
|
||||
// +kubebuilder:default={}
|
||||
Spec ClusterSetSpec `json:"spec"`
|
||||
Spec VirtualClusterPolicySpec `json:"spec"`
|
||||
|
||||
// Status reflects the observed state of the ClusterSet.
|
||||
// Status reflects the observed state of the VirtualClusterPolicy.
|
||||
//
|
||||
// +optional
|
||||
Status ClusterSetStatus `json:"status,omitempty"`
|
||||
Status VirtualClusterPolicyStatus `json:"status"`
|
||||
}
|
||||
|
||||
// ClusterSetSpec defines the desired state of a ClusterSet.
|
||||
type ClusterSetSpec struct {
|
||||
// VirtualClusterPolicySpec defines the desired state of a VirtualClusterPolicy.
|
||||
type VirtualClusterPolicySpec struct {
|
||||
|
||||
// DefaultLimits specifies the default resource limits for servers/agents when a cluster in the set doesn't provide any.
|
||||
// Quota specifies the resource limits for clusters within a clusterpolicy.
|
||||
//
|
||||
// +optional
|
||||
DefaultLimits *ClusterLimit `json:"defaultLimits,omitempty"`
|
||||
Quota *v1.ResourceQuotaSpec `json:"quota,omitempty"`
|
||||
|
||||
// DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the set.
|
||||
// Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy
|
||||
// to set defaults and constraints (min/max)
|
||||
//
|
||||
// +optional
|
||||
Limit *v1.LimitRangeSpec `json:"limit,omitempty"`
|
||||
|
||||
// DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace.
|
||||
//
|
||||
// +optional
|
||||
DefaultNodeSelector map[string]string `json:"defaultNodeSelector,omitempty"`
|
||||
|
||||
// DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the set.
|
||||
// DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the target Namespace.
|
||||
//
|
||||
// +optional
|
||||
DefaultPriorityClass string `json:"defaultPriorityClass,omitempty"`
|
||||
|
||||
// MaxLimits specifies the maximum resource limits that apply to all clusters (server + agent) in the set.
|
||||
// AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared".
|
||||
//
|
||||
// +optional
|
||||
MaxLimits v1.ResourceList `json:"maxLimits,omitempty"`
|
||||
|
||||
// AllowedNodeTypes specifies the allowed cluster provisioning modes. Defaults to [shared].
|
||||
//
|
||||
// +kubebuilder:default={shared}
|
||||
// +kubebuilder:default=shared
|
||||
// +kubebuilder:validation:XValidation:message="mode is immutable",rule="self == oldSelf"
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
// +optional
|
||||
AllowedNodeTypes []ClusterMode `json:"allowedNodeTypes,omitempty"`
|
||||
AllowedMode ClusterMode `json:"allowedMode,omitempty"`
|
||||
|
||||
// DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation.
|
||||
//
|
||||
@@ -393,8 +412,8 @@ const (
|
||||
RestrictedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("restricted")
|
||||
)
|
||||
|
||||
// ClusterSetStatus reflects the observed state of a ClusterSet.
|
||||
type ClusterSetStatus struct {
|
||||
// VirtualClusterPolicyStatus reflects the observed state of a VirtualClusterPolicy.
|
||||
type VirtualClusterPolicyStatus struct {
|
||||
// ObservedGeneration was the generation at the time the status was updated.
|
||||
//
|
||||
// +optional
|
||||
@@ -421,10 +440,10 @@ type ClusterSetStatus struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// ClusterSetList is a list of ClusterSet resources.
|
||||
type ClusterSetList struct {
|
||||
// VirtualClusterPolicyList is a list of VirtualClusterPolicy resources.
|
||||
type VirtualClusterPolicyList struct {
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
Items []ClusterSet `json:"items"`
|
||||
Items []VirtualClusterPolicy `json:"items"`
|
||||
}
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Addon) DeepCopyInto(out *Addon) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addon.
|
||||
@@ -34,7 +32,6 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
|
||||
@@ -55,36 +52,6 @@ func (in *Cluster) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterLimit) DeepCopyInto(out *ClusterLimit) {
|
||||
*out = *in
|
||||
if in.ServerLimit != nil {
|
||||
in, out := &in.ServerLimit, &out.ServerLimit
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.WorkerLimit != nil {
|
||||
in, out := &in.WorkerLimit, &out.WorkerLimit
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLimit.
|
||||
func (in *ClusterLimit) DeepCopy() *ClusterLimit {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterLimit)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterList) DeepCopyInto(out *ClusterList) {
|
||||
*out = *in
|
||||
@@ -97,7 +64,6 @@ func (in *ClusterList) DeepCopyInto(out *ClusterList) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList.
|
||||
@@ -118,135 +84,6 @@ func (in *ClusterList) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterSet) DeepCopyInto(out *ClusterSet) {
|
||||
*out = *in
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSet.
|
||||
func (in *ClusterSet) DeepCopy() *ClusterSet {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterSet)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterSet) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterSetList) DeepCopyInto(out *ClusterSetList) {
|
||||
*out = *in
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ClusterSet, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetList.
|
||||
func (in *ClusterSetList) DeepCopy() *ClusterSetList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterSetList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterSetList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterSetSpec) DeepCopyInto(out *ClusterSetSpec) {
|
||||
*out = *in
|
||||
if in.MaxLimits != nil {
|
||||
in, out := &in.MaxLimits, &out.MaxLimits
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.DefaultLimits != nil {
|
||||
in, out := &in.DefaultLimits, &out.DefaultLimits
|
||||
*out = new(ClusterLimit)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.DefaultNodeSelector != nil {
|
||||
in, out := &in.DefaultNodeSelector, &out.DefaultNodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.AllowedNodeTypes != nil {
|
||||
in, out := &in.AllowedNodeTypes, &out.AllowedNodeTypes
|
||||
*out = make([]ClusterMode, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PodSecurityAdmissionLevel != nil {
|
||||
in, out := &in.PodSecurityAdmissionLevel, &out.PodSecurityAdmissionLevel
|
||||
*out = new(PodSecurityAdmissionLevel)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetSpec.
|
||||
func (in *ClusterSetSpec) DeepCopy() *ClusterSetSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterSetSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterSetStatus) DeepCopyInto(out *ClusterSetStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetStatus.
|
||||
func (in *ClusterSetStatus) DeepCopy() *ClusterSetStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterSetStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
*out = *in
|
||||
@@ -260,6 +97,12 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
in.Persistence.DeepCopyInto(&out.Persistence)
|
||||
if in.Expose != nil {
|
||||
in, out := &in.Expose, &out.Expose
|
||||
*out = new(ExposeConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
@@ -267,16 +110,16 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.Limit != nil {
|
||||
in, out := &in.Limit, &out.Limit
|
||||
*out = new(ClusterLimit)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.TokenSecretRef != nil {
|
||||
in, out := &in.TokenSecretRef, &out.TokenSecretRef
|
||||
*out = new(v1.SecretReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.TLSSANs != nil {
|
||||
in, out := &in.TLSSANs, &out.TLSSANs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ServerArgs != nil {
|
||||
in, out := &in.ServerArgs, &out.ServerArgs
|
||||
*out = make([]string, len(*in))
|
||||
@@ -287,23 +130,39 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.TLSSANs != nil {
|
||||
in, out := &in.TLSSANs, &out.TLSSANs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
if in.ServerEnvs != nil {
|
||||
in, out := &in.ServerEnvs, &out.ServerEnvs
|
||||
*out = make([]v1.EnvVar, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.AgentEnvs != nil {
|
||||
in, out := &in.AgentEnvs, &out.AgentEnvs
|
||||
*out = make([]v1.EnvVar, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Addons != nil {
|
||||
in, out := &in.Addons, &out.Addons
|
||||
*out = make([]Addon, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.Persistence.DeepCopyInto(&out.Persistence)
|
||||
if in.Expose != nil {
|
||||
in, out := &in.Expose, &out.Expose
|
||||
*out = new(ExposeConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
if in.ServerLimit != nil {
|
||||
in, out := &in.ServerLimit, &out.ServerLimit
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.WorkerLimit != nil {
|
||||
in, out := &in.WorkerLimit, &out.WorkerLimit
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
|
||||
@@ -325,7 +184,6 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.Persistence.DeepCopyInto(&out.Persistence)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus.
|
||||
@@ -349,14 +207,13 @@ func (in *ExposeConfig) DeepCopyInto(out *ExposeConfig) {
|
||||
if in.LoadBalancer != nil {
|
||||
in, out := &in.LoadBalancer, &out.LoadBalancer
|
||||
*out = new(LoadBalancerConfig)
|
||||
**out = **in
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodePort != nil {
|
||||
in, out := &in.NodePort, &out.NodePort
|
||||
*out = new(NodePortConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExposeConfig.
|
||||
@@ -379,7 +236,6 @@ func (in *IngressConfig) DeepCopyInto(out *IngressConfig) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressConfig.
|
||||
@@ -395,7 +251,16 @@ func (in *IngressConfig) DeepCopy() *IngressConfig {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LoadBalancerConfig) DeepCopyInto(out *LoadBalancerConfig) {
|
||||
*out = *in
|
||||
return
|
||||
if in.ServerPort != nil {
|
||||
in, out := &in.ServerPort, &out.ServerPort
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.ETCDPort != nil {
|
||||
in, out := &in.ETCDPort, &out.ETCDPort
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerConfig.
|
||||
@@ -416,17 +281,11 @@ func (in *NodePortConfig) DeepCopyInto(out *NodePortConfig) {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.ServicePort != nil {
|
||||
in, out := &in.ServicePort, &out.ServicePort
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.ETCDPort != nil {
|
||||
in, out := &in.ETCDPort, &out.ETCDPort
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortConfig.
|
||||
@@ -447,7 +306,6 @@ func (in *PersistenceConfig) DeepCopyInto(out *PersistenceConfig) {
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistenceConfig.
|
||||
@@ -459,3 +317,121 @@ func (in *PersistenceConfig) DeepCopy() *PersistenceConfig {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VirtualClusterPolicy) DeepCopyInto(out *VirtualClusterPolicy) {
|
||||
*out = *in
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicy.
|
||||
func (in *VirtualClusterPolicy) DeepCopy() *VirtualClusterPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VirtualClusterPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *VirtualClusterPolicy) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VirtualClusterPolicyList) DeepCopyInto(out *VirtualClusterPolicyList) {
|
||||
*out = *in
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]VirtualClusterPolicy, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicyList.
|
||||
func (in *VirtualClusterPolicyList) DeepCopy() *VirtualClusterPolicyList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VirtualClusterPolicyList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *VirtualClusterPolicyList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VirtualClusterPolicySpec) DeepCopyInto(out *VirtualClusterPolicySpec) {
|
||||
*out = *in
|
||||
if in.Quota != nil {
|
||||
in, out := &in.Quota, &out.Quota
|
||||
*out = new(v1.ResourceQuotaSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Limit != nil {
|
||||
in, out := &in.Limit, &out.Limit
|
||||
*out = new(v1.LimitRangeSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.DefaultNodeSelector != nil {
|
||||
in, out := &in.DefaultNodeSelector, &out.DefaultNodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.PodSecurityAdmissionLevel != nil {
|
||||
in, out := &in.PodSecurityAdmissionLevel, &out.PodSecurityAdmissionLevel
|
||||
*out = new(PodSecurityAdmissionLevel)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicySpec.
|
||||
func (in *VirtualClusterPolicySpec) DeepCopy() *VirtualClusterPolicySpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VirtualClusterPolicySpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VirtualClusterPolicyStatus) DeepCopyInto(out *VirtualClusterPolicyStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicyStatus.
|
||||
func (in *VirtualClusterPolicyStatus) DeepCopy() *VirtualClusterPolicyStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VirtualClusterPolicyStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -41,14 +42,21 @@ func configSecretName(clusterName string) string {
|
||||
func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
result, err := controllerutil.CreateOrUpdate(ctx, cfg.client, obj, func() error {
|
||||
return controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme)
|
||||
})
|
||||
key := ctrlruntimeclient.ObjectKeyFromObject(obj)
|
||||
|
||||
if result != controllerutil.OperationResultNone {
|
||||
key := ctrlruntimeclient.ObjectKeyFromObject(obj)
|
||||
log.Info(fmt.Sprintf("ensuring %T", obj), "key", key, "result", result)
|
||||
log.Info(fmt.Sprintf("ensuring %T", obj), "key", key)
|
||||
|
||||
if err := controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
if err := cfg.client.Create(ctx, obj); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return cfg.client.Update(ctx, obj)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -193,7 +193,7 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
"--config",
|
||||
sharedKubeletConfigPath,
|
||||
},
|
||||
Env: []v1.EnvVar{
|
||||
Env: append([]v1.EnvVar{
|
||||
{
|
||||
Name: "AGENT_HOSTNAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
@@ -203,7 +203,7 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, s.cluster.Spec.AgentEnvs...),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "config",
|
||||
|
||||
@@ -20,15 +20,19 @@ const (
|
||||
|
||||
type VirtualAgent struct {
|
||||
*Config
|
||||
serviceIP string
|
||||
token string
|
||||
serviceIP string
|
||||
token string
|
||||
k3SImage string
|
||||
k3SImagePullPolicy string
|
||||
}
|
||||
|
||||
func NewVirtualAgent(config *Config, serviceIP, token string) *VirtualAgent {
|
||||
func NewVirtualAgent(config *Config, serviceIP, token string, k3SImage string, k3SImagePullPolicy string) *VirtualAgent {
|
||||
return &VirtualAgent{
|
||||
Config: config,
|
||||
serviceIP: serviceIP,
|
||||
token: token,
|
||||
Config: config,
|
||||
serviceIP: serviceIP,
|
||||
token: token,
|
||||
k3SImage: k3SImage,
|
||||
k3SImagePullPolicy: k3SImagePullPolicy,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,13 +76,13 @@ func (v *VirtualAgent) config(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func virtualAgentData(serviceIP, token string) string {
|
||||
return fmt.Sprintf(`server: https://%s:6443
|
||||
return fmt.Sprintf(`server: https://%s
|
||||
token: %s
|
||||
with-node-id: true`, serviceIP, token)
|
||||
}
|
||||
|
||||
func (v *VirtualAgent) deployment(ctx context.Context) error {
|
||||
image := controller.K3SImage(v.cluster)
|
||||
image := controller.K3SImage(v.cluster, v.k3SImage)
|
||||
|
||||
const name = "k3k-agent"
|
||||
|
||||
@@ -175,8 +179,9 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Name: name,
|
||||
Image: image,
|
||||
ImagePullPolicy: v1.PullPolicy(v.k3SImagePullPolicy),
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: ptr.To(true),
|
||||
},
|
||||
@@ -187,6 +192,7 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: limit,
|
||||
},
|
||||
Env: v.cluster.Spec.AgentEnvs,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "config",
|
||||
@@ -228,5 +234,12 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
|
||||
},
|
||||
}
|
||||
|
||||
// specify resource limits if specified for the servers.
|
||||
if v.cluster.Spec.WorkerLimit != nil {
|
||||
podSpec.Containers[0].Resources = v1.ResourceRequirements{
|
||||
Limits: v.cluster.Spec.WorkerLimit,
|
||||
}
|
||||
}
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func Test_virtualAgentData(t *testing.T) {
|
||||
token: "dnjklsdjnksd892389238",
|
||||
},
|
||||
expectedData: map[string]string{
|
||||
"server": "https://10.0.0.21:6443",
|
||||
"server": "https://10.0.0.21",
|
||||
"token": "dnjklsdjnksd892389238",
|
||||
"with-node-id": "true",
|
||||
},
|
||||
|
||||
@@ -56,10 +56,12 @@ type ClusterReconciler struct {
|
||||
Scheme *runtime.Scheme
|
||||
SharedAgentImage string
|
||||
SharedAgentImagePullPolicy string
|
||||
K3SImage string
|
||||
K3SImagePullPolicy string
|
||||
}
|
||||
|
||||
// Add adds a new controller to the manager
|
||||
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy string) error {
|
||||
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy string, k3SImage string, k3SImagePullPolicy string) error {
|
||||
discoveryClient, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -76,6 +78,8 @@ func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgent
|
||||
Scheme: mgr.GetScheme(),
|
||||
SharedAgentImage: sharedAgentImage,
|
||||
SharedAgentImagePullPolicy: sharedAgentImagePullPolicy,
|
||||
K3SImage: k3SImage,
|
||||
K3SImagePullPolicy: k3SImagePullPolicy,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
@@ -84,6 +88,7 @@ func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgent
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
}).
|
||||
Owns(&apps.StatefulSet{}).
|
||||
Owns(&v1.Service{}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
@@ -123,6 +128,11 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
// if there was an error during the reconciliation, return
|
||||
if reconcilerErr != nil {
|
||||
if errors.Is(reconcilerErr, bootstrap.ErrServerNotReady) {
|
||||
log.Info("server not ready, requeueing")
|
||||
return reconcile.Result{RequeueAfter: time.Second * 10}, nil
|
||||
}
|
||||
|
||||
return reconcile.Result{}, reconcilerErr
|
||||
}
|
||||
|
||||
@@ -165,7 +175,7 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
|
||||
return err
|
||||
}
|
||||
|
||||
s := server.New(cluster, c.Client, token, string(cluster.Spec.Mode))
|
||||
s := server.New(cluster, c.Client, token, string(cluster.Spec.Mode), c.K3SImage, c.K3SImagePullPolicy)
|
||||
|
||||
cluster.Status.Persistence = cluster.Spec.Persistence
|
||||
if cluster.Spec.Persistence.StorageRequestSize == "" {
|
||||
@@ -520,7 +530,7 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.C
|
||||
|
||||
var agentEnsurer agent.ResourceEnsurer
|
||||
if cluster.Spec.Mode == agent.VirtualNodeMode {
|
||||
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token)
|
||||
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token, c.K3SImage, c.K3SImagePullPolicy)
|
||||
} else {
|
||||
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token)
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ var _ = BeforeSuite(func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
err = cluster.Add(ctx, mgr, "rancher/k3k-kubelet:latest", "")
|
||||
err = cluster.Add(ctx, mgr, "rancher/k3k-kubelet:latest", "", "rancher/k3s", "")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -92,13 +92,37 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
Expect(spec.Ingress).To(Equal([]networkingv1.NetworkPolicyIngressRule{{}}))
|
||||
})
|
||||
|
||||
When("exposing the cluster with nodePort and custom ports", func() {
|
||||
It("will have a NodePort service with the specified port exposed", func() {
|
||||
When("exposing the cluster with nodePort", func() {
|
||||
It("will have a NodePort service", func() {
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
}
|
||||
|
||||
err := k8sClient.Update(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
var service v1.Service
|
||||
|
||||
Eventually(func() v1.ServiceType {
|
||||
serviceKey := client.ObjectKey{
|
||||
Name: server.ServiceName(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
err := k8sClient.Get(ctx, serviceKey, &service)
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
return service.Spec.Type
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
WithPolling(time.Second).
|
||||
Should(Equal(v1.ServiceTypeNodePort))
|
||||
})
|
||||
|
||||
It("will have the specified ports exposed when specified", func() {
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{
|
||||
ServerPort: ptr.To[int32](30010),
|
||||
ServicePort: ptr.To[int32](30011),
|
||||
ETCDPort: ptr.To[int32](30012),
|
||||
ServerPort: ptr.To[int32](30010),
|
||||
ETCDPort: ptr.To[int32](30011),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -123,29 +147,95 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
servicePorts := service.Spec.Ports
|
||||
Expect(servicePorts).NotTo(BeEmpty())
|
||||
Expect(servicePorts).To(HaveLen(3))
|
||||
Expect(servicePorts).To(HaveLen(2))
|
||||
|
||||
Expect(servicePorts).To(ContainElement(
|
||||
And(
|
||||
HaveField("Name", "k3s-server-port"),
|
||||
HaveField("Port", BeEquivalentTo(6443)),
|
||||
HaveField("NodePort", BeEquivalentTo(30010)),
|
||||
),
|
||||
))
|
||||
Expect(servicePorts).To(ContainElement(
|
||||
And(
|
||||
HaveField("Name", "k3s-service-port"),
|
||||
HaveField("Port", BeEquivalentTo(443)),
|
||||
HaveField("NodePort", BeEquivalentTo(30011)),
|
||||
),
|
||||
))
|
||||
Expect(servicePorts).To(ContainElement(
|
||||
And(
|
||||
HaveField("Name", "k3s-etcd-port"),
|
||||
HaveField("Port", BeEquivalentTo(2379)),
|
||||
HaveField("NodePort", BeEquivalentTo(30012)),
|
||||
),
|
||||
))
|
||||
serverPort := servicePorts[0]
|
||||
Expect(serverPort.Name).To(Equal("k3s-server-port"))
|
||||
Expect(serverPort.Port).To(BeEquivalentTo(443))
|
||||
Expect(serverPort.NodePort).To(BeEquivalentTo(30010))
|
||||
|
||||
etcdPort := servicePorts[1]
|
||||
Expect(etcdPort.Name).To(Equal("k3s-etcd-port"))
|
||||
Expect(etcdPort.Port).To(BeEquivalentTo(2379))
|
||||
Expect(etcdPort.NodePort).To(BeEquivalentTo(30011))
|
||||
})
|
||||
|
||||
It("will not expose the port when out of range", func() {
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{
|
||||
ETCDPort: ptr.To[int32](2222),
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Update(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
var service v1.Service
|
||||
|
||||
Eventually(func() v1.ServiceType {
|
||||
serviceKey := client.ObjectKey{
|
||||
Name: server.ServiceName(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
err := k8sClient.Get(ctx, serviceKey, &service)
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
return service.Spec.Type
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
WithPolling(time.Second).
|
||||
Should(Equal(v1.ServiceTypeNodePort))
|
||||
|
||||
servicePorts := service.Spec.Ports
|
||||
Expect(servicePorts).NotTo(BeEmpty())
|
||||
Expect(servicePorts).To(HaveLen(1))
|
||||
|
||||
serverPort := servicePorts[0]
|
||||
Expect(serverPort.Name).To(Equal("k3s-server-port"))
|
||||
Expect(serverPort.Port).To(BeEquivalentTo(443))
|
||||
Expect(serverPort.TargetPort.IntValue()).To(BeEquivalentTo(6443))
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
When("exposing the cluster with loadbalancer", func() {
|
||||
It("will have a LoadBalancer service with the default ports exposed", func() {
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
LoadBalancer: &v1alpha1.LoadBalancerConfig{},
|
||||
}
|
||||
|
||||
err := k8sClient.Update(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
var service v1.Service
|
||||
|
||||
Eventually(func() v1.ServiceType {
|
||||
serviceKey := client.ObjectKey{
|
||||
Name: server.ServiceName(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
err := k8sClient.Get(ctx, serviceKey, &service)
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
return service.Spec.Type
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
WithPolling(time.Second).
|
||||
Should(Equal(v1.ServiceTypeLoadBalancer))
|
||||
|
||||
servicePorts := service.Spec.Ports
|
||||
Expect(servicePorts).NotTo(BeEmpty())
|
||||
Expect(servicePorts).To(HaveLen(2))
|
||||
|
||||
serverPort := servicePorts[0]
|
||||
Expect(serverPort.Name).To(Equal("k3s-server-port"))
|
||||
Expect(serverPort.Port).To(BeEquivalentTo(443))
|
||||
Expect(serverPort.TargetPort.IntValue()).To(BeEquivalentTo(6443))
|
||||
|
||||
etcdPort := servicePorts[1]
|
||||
Expect(etcdPort.Name).To(Equal("k3s-etcd-port"))
|
||||
Expect(etcdPort.Port).To(BeEquivalentTo(2379))
|
||||
Expect(etcdPort.TargetPort.IntValue()).To(BeEquivalentTo(2379))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
@@ -17,6 +18,8 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
var ErrServerNotReady = errors.New("server not ready")
|
||||
|
||||
type ControlRuntimeBootstrap struct {
|
||||
ServerCA content `json:"serverCA"`
|
||||
ServerCAKey content `json:"serverCAKey"`
|
||||
@@ -48,7 +51,7 @@ func GenerateBootstrapData(ctx context.Context, cluster *v1alpha1.Cluster, ip, t
|
||||
}
|
||||
|
||||
func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error) {
|
||||
url := "https://" + serverIP + ":6443/v1-k3s/server-bootstrap"
|
||||
url := "https://" + serverIP + "/v1-k3s/server-bootstrap"
|
||||
|
||||
client := http.Client{
|
||||
Transport: &http.Transport{
|
||||
@@ -68,6 +71,10 @@ func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.ECONNREFUSED) {
|
||||
return nil, ErrServerNotReady
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
@@ -39,7 +39,7 @@ func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) {
|
||||
}
|
||||
|
||||
func serverConfigData(serviceIP string, cluster *v1alpha1.Cluster, token string) string {
|
||||
return "cluster-init: true\nserver: https://" + serviceIP + ":6443\n" + serverOptions(cluster, token)
|
||||
return "cluster-init: true\nserver: https://" + serviceIP + "\n" + serverOptions(cluster, token)
|
||||
}
|
||||
|
||||
func initConfigData(cluster *v1alpha1.Cluster, token string) string {
|
||||
|
||||
@@ -11,9 +11,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
servicePort = 443
|
||||
serverPort = 6443
|
||||
etcdPort = 2379
|
||||
httpsPort = 443
|
||||
k3sServerPort = 6443
|
||||
etcdPort = 2379
|
||||
)
|
||||
|
||||
func IngressName(clusterName string) string {
|
||||
@@ -64,7 +64,7 @@ func ingressRules(cluster *v1alpha1.Cluster) []networkingv1.IngressRule {
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: ServiceName(cluster.Name),
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: serverPort,
|
||||
Number: httpsPort,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -24,33 +24,30 @@ const (
|
||||
serverName = "server"
|
||||
configName = "server-config"
|
||||
initConfigName = "init-server-config"
|
||||
|
||||
ServerPort = 6443
|
||||
)
|
||||
|
||||
// Server
|
||||
type Server struct {
|
||||
cluster *v1alpha1.Cluster
|
||||
client client.Client
|
||||
mode string
|
||||
token string
|
||||
cluster *v1alpha1.Cluster
|
||||
client client.Client
|
||||
mode string
|
||||
token string
|
||||
k3SImage string
|
||||
k3SImagePullPolicy string
|
||||
}
|
||||
|
||||
func New(cluster *v1alpha1.Cluster, client client.Client, token, mode string) *Server {
|
||||
func New(cluster *v1alpha1.Cluster, client client.Client, token, mode string, k3SImage string, k3SImagePullPolicy string) *Server {
|
||||
return &Server{
|
||||
cluster: cluster,
|
||||
client: client,
|
||||
token: token,
|
||||
mode: mode,
|
||||
cluster: cluster,
|
||||
client: client,
|
||||
token: token,
|
||||
mode: mode,
|
||||
k3SImage: k3SImage,
|
||||
k3SImagePullPolicy: k3SImagePullPolicy,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) podSpec(image, name string, persistent bool, startupCmd string) v1.PodSpec {
|
||||
var limit v1.ResourceList
|
||||
if s.cluster.Spec.Limit != nil && s.cluster.Spec.Limit.ServerLimit != nil {
|
||||
limit = s.cluster.Spec.Limit.ServerLimit
|
||||
}
|
||||
|
||||
podSpec := v1.PodSpec{
|
||||
NodeSelector: s.cluster.Spec.NodeSelector,
|
||||
PriorityClassName: s.cluster.Spec.PriorityClass,
|
||||
@@ -116,11 +113,9 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: limit,
|
||||
},
|
||||
Name: name,
|
||||
Image: image,
|
||||
ImagePullPolicy: v1.PullPolicy(s.k3SImagePullPolicy),
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
@@ -213,6 +208,20 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
|
||||
},
|
||||
},
|
||||
}
|
||||
podSpec.Containers[0].LivenessProbe = &v1.Probe{
|
||||
InitialDelaySeconds: 10,
|
||||
FailureThreshold: 3,
|
||||
PeriodSeconds: 3,
|
||||
ProbeHandler: v1.ProbeHandler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: []string{
|
||||
"sh",
|
||||
"-c",
|
||||
`grep -q "rejoin the cluster" /var/log/k3s.log && exit 1 || exit 0`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// start the pod unprivileged in shared mode
|
||||
if s.mode == agent.VirtualNodeMode {
|
||||
podSpec.Containers[0].SecurityContext = &v1.SecurityContext{
|
||||
@@ -220,6 +229,15 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
|
||||
}
|
||||
}
|
||||
|
||||
// specify resource limits if specified for the servers.
|
||||
if s.cluster.Spec.ServerLimit != nil {
|
||||
podSpec.Containers[0].Resources = v1.ResourceRequirements{
|
||||
Limits: s.cluster.Spec.ServerLimit,
|
||||
}
|
||||
}
|
||||
|
||||
podSpec.Containers[0].Env = append(podSpec.Containers[0].Env, s.cluster.Spec.ServerEnvs...)
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
@@ -231,7 +249,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
|
||||
persistent bool
|
||||
)
|
||||
|
||||
image := controller.K3SImage(s.cluster)
|
||||
image := controller.K3SImage(s.cluster, s.k3SImage)
|
||||
name := controller.SafeConcatNameWithPrefix(s.cluster.Name, serverName)
|
||||
|
||||
replicas = *s.cluster.Spec.Servers
|
||||
|
||||
@@ -19,7 +19,6 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
|
||||
Namespace: cluster.Namespace,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{
|
||||
"cluster": cluster.Name,
|
||||
"role": "server",
|
||||
@@ -28,16 +27,10 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
|
||||
}
|
||||
|
||||
k3sServerPort := v1.ServicePort{
|
||||
Name: "k3s-server-port",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: serverPort,
|
||||
}
|
||||
|
||||
k3sServicePort := v1.ServicePort{
|
||||
Name: "k3s-service-port",
|
||||
Name: "k3s-server-port",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: servicePort,
|
||||
TargetPort: intstr.FromInt(serverPort),
|
||||
Port: httpsPort,
|
||||
TargetPort: intstr.FromInt(k3sServerPort),
|
||||
}
|
||||
|
||||
etcdPort := v1.ServicePort{
|
||||
@@ -46,35 +39,90 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
|
||||
Port: etcdPort,
|
||||
}
|
||||
|
||||
// If no expose is specified, default to ClusterIP
|
||||
if cluster.Spec.Expose == nil {
|
||||
service.Spec.Type = v1.ServiceTypeClusterIP
|
||||
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort, etcdPort)
|
||||
}
|
||||
|
||||
// If expose is specified, set the type to the appropriate type
|
||||
if cluster.Spec.Expose != nil {
|
||||
nodePortConfig := cluster.Spec.Expose.NodePort
|
||||
if nodePortConfig != nil {
|
||||
expose := cluster.Spec.Expose
|
||||
|
||||
// ingress
|
||||
if expose.Ingress != nil {
|
||||
service.Spec.Type = v1.ServiceTypeClusterIP
|
||||
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort, etcdPort)
|
||||
}
|
||||
|
||||
// loadbalancer
|
||||
if expose.LoadBalancer != nil {
|
||||
service.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
addLoadBalancerPorts(service, *expose.LoadBalancer, k3sServerPort, etcdPort)
|
||||
}
|
||||
|
||||
// nodeport
|
||||
if expose.NodePort != nil {
|
||||
service.Spec.Type = v1.ServiceTypeNodePort
|
||||
|
||||
if nodePortConfig.ServerPort != nil {
|
||||
k3sServerPort.NodePort = *nodePortConfig.ServerPort
|
||||
}
|
||||
|
||||
if nodePortConfig.ServicePort != nil {
|
||||
k3sServicePort.NodePort = *nodePortConfig.ServicePort
|
||||
}
|
||||
|
||||
if nodePortConfig.ETCDPort != nil {
|
||||
etcdPort.NodePort = *nodePortConfig.ETCDPort
|
||||
}
|
||||
addNodePortPorts(service, *expose.NodePort, k3sServerPort, etcdPort)
|
||||
}
|
||||
}
|
||||
|
||||
service.Spec.Ports = append(
|
||||
service.Spec.Ports,
|
||||
k3sServicePort,
|
||||
etcdPort,
|
||||
k3sServerPort,
|
||||
)
|
||||
|
||||
return service
|
||||
}
|
||||
|
||||
// addLoadBalancerPorts adds the load balancer ports to the service
|
||||
func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1alpha1.LoadBalancerConfig, k3sServerPort, etcdPort v1.ServicePort) {
|
||||
// If the server port is not specified, use the default port
|
||||
if loadbalancerConfig.ServerPort == nil {
|
||||
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)
|
||||
} else if *loadbalancerConfig.ServerPort > 0 {
|
||||
// If the server port is specified, set the port, otherwise the service will not be exposed
|
||||
k3sServerPort.Port = *loadbalancerConfig.ServerPort
|
||||
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)
|
||||
}
|
||||
|
||||
// If the etcd port is not specified, use the default port
|
||||
if loadbalancerConfig.ETCDPort == nil {
|
||||
service.Spec.Ports = append(service.Spec.Ports, etcdPort)
|
||||
} else if *loadbalancerConfig.ETCDPort > 0 {
|
||||
// If the etcd port is specified, set the port, otherwise the service will not be exposed
|
||||
etcdPort.Port = *loadbalancerConfig.ETCDPort
|
||||
service.Spec.Ports = append(service.Spec.Ports, etcdPort)
|
||||
}
|
||||
}
|
||||
|
||||
// addNodePortPorts adds the node port ports to the service
|
||||
func addNodePortPorts(service *v1.Service, nodePortConfig v1alpha1.NodePortConfig, k3sServerPort, etcdPort v1.ServicePort) {
|
||||
// If the server port is not specified Kubernetes will set the node port to a random port between 30000-32767
|
||||
if nodePortConfig.ServerPort == nil {
|
||||
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)
|
||||
} else {
|
||||
serverNodePort := *nodePortConfig.ServerPort
|
||||
|
||||
// If the server port is in the range of 30000-32767, set the node port
|
||||
// otherwise the service will not be exposed
|
||||
if serverNodePort >= 30000 && serverNodePort <= 32767 {
|
||||
k3sServerPort.NodePort = serverNodePort
|
||||
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)
|
||||
}
|
||||
}
|
||||
|
||||
// If the etcd port is not specified Kubernetes will set the node port to a random port between 30000-32767
|
||||
if nodePortConfig.ETCDPort == nil {
|
||||
service.Spec.Ports = append(service.Spec.Ports, etcdPort)
|
||||
} else {
|
||||
etcdNodePort := *nodePortConfig.ETCDPort
|
||||
|
||||
// If the etcd port is in the range of 30000-32767, set the node port
|
||||
// otherwise the service will not be exposed
|
||||
if etcdNodePort >= 30000 && etcdNodePort <= 32767 {
|
||||
etcdPort.NodePort = etcdNodePort
|
||||
service.Spec.Ports = append(service.Spec.Ports, etcdPort)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) StatefulServerService() *v1.Service {
|
||||
return &v1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -94,15 +142,10 @@ func (s *Server) StatefulServerService() *v1.Service {
|
||||
},
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "k3s-server-port",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: serverPort,
|
||||
},
|
||||
{
|
||||
Name: "k3s-service-port",
|
||||
Name: "k3s-server-port",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: servicePort,
|
||||
TargetPort: intstr.FromInt(serverPort),
|
||||
Port: httpsPort,
|
||||
TargetPort: intstr.FromInt(k3sServerPort),
|
||||
},
|
||||
{
|
||||
Name: "k3s-etcd-port",
|
||||
|
||||
@@ -3,14 +3,14 @@ package server
|
||||
var singleServerTemplate string = `
|
||||
if [ -d "{{.ETCD_DIR}}" ]; then
|
||||
# if directory exists then it means its not an initial run
|
||||
/bin/k3s server --cluster-reset --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}}
|
||||
/bin/k3s server --cluster-reset --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
|
||||
fi
|
||||
rm -f /var/lib/rancher/k3s/server/db/reset-flag
|
||||
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}}`
|
||||
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log`
|
||||
|
||||
var HAServerTemplate string = `
|
||||
if [ ${POD_NAME: -1} == 0 ] && [ ! -d "{{.ETCD_DIR}}" ]; then
|
||||
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}}
|
||||
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
|
||||
else
|
||||
/bin/k3s server --config {{.SERVER_CONFIG}} {{.EXTRA_ARGS}}
|
||||
/bin/k3s server --config {{.SERVER_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
|
||||
fi`
|
||||
|
||||
@@ -1,342 +0,0 @@
|
||||
package clusterset
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
const (
|
||||
clusterSetController = "k3k-clusterset-controller"
|
||||
allTrafficCIDR = "0.0.0.0/0"
|
||||
maxConcurrentReconciles = 1
|
||||
)
|
||||
|
||||
type ClusterSetReconciler struct {
|
||||
Client ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
ClusterCIDR string
|
||||
}
|
||||
|
||||
// Add adds a new controller to the manager
|
||||
func Add(ctx context.Context, mgr manager.Manager, clusterCIDR string) error {
|
||||
// initialize a new Reconciler
|
||||
reconciler := ClusterSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
ClusterCIDR: clusterCIDR,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.ClusterSet{}).
|
||||
Owns(&networkingv1.NetworkPolicy{}).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
}).
|
||||
Watches(
|
||||
&v1.Namespace{},
|
||||
handler.EnqueueRequestsFromMapFunc(namespaceEventHandler(reconciler)),
|
||||
builder.WithPredicates(namespaceLabelsPredicate()),
|
||||
).
|
||||
Watches(
|
||||
&v1alpha1.Cluster{},
|
||||
handler.EnqueueRequestsFromMapFunc(sameNamespaceEventHandler(reconciler)),
|
||||
).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
// namespaceEventHandler will enqueue reconciling requests for all the ClusterSets in the changed namespace
|
||||
func namespaceEventHandler(reconciler ClusterSetReconciler) handler.MapFunc {
|
||||
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
var (
|
||||
requests []reconcile.Request
|
||||
set v1alpha1.ClusterSetList
|
||||
)
|
||||
|
||||
_ = reconciler.Client.List(ctx, &set, client.InNamespace(obj.GetName()))
|
||||
|
||||
for _, clusterSet := range set.Items {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: clusterSet.Name,
|
||||
Namespace: obj.GetName(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return requests
|
||||
}
|
||||
}
|
||||
|
||||
// sameNamespaceEventHandler will enqueue reconciling requests for all the ClusterSets in the changed namespace
|
||||
func sameNamespaceEventHandler(reconciler ClusterSetReconciler) handler.MapFunc {
|
||||
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
var (
|
||||
requests []reconcile.Request
|
||||
set v1alpha1.ClusterSetList
|
||||
)
|
||||
|
||||
_ = reconciler.Client.List(ctx, &set, client.InNamespace(obj.GetNamespace()))
|
||||
|
||||
for _, clusterSet := range set.Items {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: clusterSet.Name,
|
||||
Namespace: obj.GetNamespace(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return requests
|
||||
}
|
||||
}
|
||||
|
||||
// namespaceLabelsPredicate returns a predicate that will allow a reconciliation if the labels of a Namespace changed
|
||||
func namespaceLabelsPredicate() predicate.Predicate {
|
||||
return predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
oldObj := e.ObjectOld.(*v1.Namespace)
|
||||
newObj := e.ObjectNew.(*v1.Namespace)
|
||||
|
||||
return !reflect.DeepEqual(oldObj.Labels, newObj.Labels)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ClusterSetReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("clusterset", req.NamespacedName)
|
||||
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
|
||||
|
||||
var clusterSet v1alpha1.ClusterSet
|
||||
if err := c.Client.Get(ctx, req.NamespacedName, &clusterSet); err != nil {
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if err := c.reconcileNetworkPolicy(ctx, &clusterSet); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := c.reconcileNamespacePodSecurityLabels(ctx, &clusterSet); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := c.reconcileClusters(ctx, &clusterSet); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// TODO: Add resource quota for clustersets
|
||||
// if clusterSet.Spec.MaxLimits != nil {
|
||||
// quota := v1.ResourceQuota{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Name: "clusterset-quota",
|
||||
// Namespace: clusterSet.Namespace,
|
||||
// OwnerReferences: []metav1.OwnerReference{
|
||||
// {
|
||||
// UID: clusterSet.UID,
|
||||
// Name: clusterSet.Name,
|
||||
// APIVersion: clusterSet.APIVersion,
|
||||
// Kind: clusterSet.Kind,
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
// quota.Spec.Hard = clusterSet.Spec.MaxLimits
|
||||
// if err := c.Client.Create(ctx, "a); err != nil {
|
||||
// return reconcile.Result{}, fmt.Errorf("unable to create resource quota from cluster set: %w", err)
|
||||
// }
|
||||
// }
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (c *ClusterSetReconciler) reconcileNetworkPolicy(ctx context.Context, clusterSet *v1alpha1.ClusterSet) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling NetworkPolicy")
|
||||
|
||||
networkPolicy, err := netpol(ctx, c.ClusterCIDR, clusterSet, c.Client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = ctrl.SetControllerReference(clusterSet, networkPolicy, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if disabled then delete the existing network policy
|
||||
if clusterSet.Spec.DisableNetworkPolicy {
|
||||
err := c.Client.Delete(ctx, networkPolicy)
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// otherwise try to create/update
|
||||
err = c.Client.Create(ctx, networkPolicy)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return c.Client.Update(ctx, networkPolicy)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func netpol(ctx context.Context, clusterCIDR string, clusterSet *v1alpha1.ClusterSet, client client.Client) (*networkingv1.NetworkPolicy, error) {
|
||||
var cidrList []string
|
||||
|
||||
if clusterCIDR != "" {
|
||||
cidrList = []string{clusterCIDR}
|
||||
} else {
|
||||
var nodeList v1.NodeList
|
||||
if err := client.List(ctx, &nodeList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
cidrList = append(cidrList, node.Spec.PodCIDRs...)
|
||||
}
|
||||
}
|
||||
|
||||
return &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: clusterSet.Namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "NetworkPolicy",
|
||||
APIVersion: "networking.k8s.io/v1",
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PolicyTypes: []networkingv1.PolicyType{
|
||||
networkingv1.PolicyTypeIngress,
|
||||
networkingv1.PolicyTypeEgress,
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{
|
||||
{},
|
||||
},
|
||||
Egress: []networkingv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
To: []networkingv1.NetworkPolicyPeer{
|
||||
{
|
||||
IPBlock: &networkingv1.IPBlock{
|
||||
CIDR: allTrafficCIDR,
|
||||
Except: cidrList,
|
||||
},
|
||||
},
|
||||
{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"kubernetes.io/metadata.name": clusterSet.Namespace,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"kubernetes.io/metadata.name": metav1.NamespaceSystem,
|
||||
},
|
||||
},
|
||||
PodSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"k8s-app": "kube-dns",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *ClusterSetReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, clusterSet *v1alpha1.ClusterSet) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling Namespace")
|
||||
|
||||
var ns v1.Namespace
|
||||
|
||||
key := types.NamespacedName{Name: clusterSet.Namespace}
|
||||
if err := c.Client.Get(ctx, key, &ns); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newLabels := map[string]string{}
|
||||
for k, v := range ns.Labels {
|
||||
newLabels[k] = v
|
||||
}
|
||||
|
||||
// cleanup of old labels
|
||||
delete(newLabels, "pod-security.kubernetes.io/enforce")
|
||||
delete(newLabels, "pod-security.kubernetes.io/enforce-version")
|
||||
delete(newLabels, "pod-security.kubernetes.io/warn")
|
||||
delete(newLabels, "pod-security.kubernetes.io/warn-version")
|
||||
|
||||
// if a PSA level is specified add the proper labels
|
||||
if clusterSet.Spec.PodSecurityAdmissionLevel != nil {
|
||||
psaLevel := *clusterSet.Spec.PodSecurityAdmissionLevel
|
||||
|
||||
newLabels["pod-security.kubernetes.io/enforce"] = string(psaLevel)
|
||||
newLabels["pod-security.kubernetes.io/enforce-version"] = "latest"
|
||||
|
||||
// skip the 'warn' only for the privileged PSA level
|
||||
if psaLevel != v1alpha1.PrivilegedPodSecurityAdmissionLevel {
|
||||
newLabels["pod-security.kubernetes.io/warn"] = string(psaLevel)
|
||||
newLabels["pod-security.kubernetes.io/warn-version"] = "latest"
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ns.Labels, newLabels) {
|
||||
log.V(1).Info("labels changed, updating namespace")
|
||||
|
||||
ns.Labels = newLabels
|
||||
|
||||
return c.Client.Update(ctx, &ns)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterSetReconciler) reconcileClusters(ctx context.Context, clusterSet *v1alpha1.ClusterSet) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling Clusters")
|
||||
|
||||
var clusters v1alpha1.ClusterList
|
||||
if err := c.Client.List(ctx, &clusters, ctrlruntimeclient.InNamespace(clusterSet.Namespace)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
for _, cluster := range clusters.Items {
|
||||
oldClusterSpec := cluster.Spec
|
||||
|
||||
if cluster.Spec.PriorityClass != clusterSet.Spec.DefaultPriorityClass {
|
||||
cluster.Spec.PriorityClass = clusterSet.Spec.DefaultPriorityClass
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cluster.Spec.NodeSelector, clusterSet.Spec.DefaultNodeSelector) {
|
||||
cluster.Spec.NodeSelector = clusterSet.Spec.DefaultNodeSelector
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(oldClusterSpec, cluster.Spec) {
|
||||
// continue updating also the other clusters even if an error occurred
|
||||
err = errors.Join(c.Client.Update(ctx, &cluster))
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -1,670 +0,0 @@
|
||||
package clusterset_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("ClusterSet Controller", Label("controller"), Label("ClusterSet"), func() {
|
||||
|
||||
Context("creating a ClusterSet", func() {
|
||||
|
||||
var (
|
||||
namespace string
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
|
||||
err := k8sClient.Create(context.Background(), createdNS)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
namespace = createdNS.Name
|
||||
})
|
||||
|
||||
When("created with a default spec", func() {
|
||||
It("should have only the 'shared' allowedNodeTypes", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
allowedModeTypes := clusterSet.Spec.AllowedNodeTypes
|
||||
Expect(allowedModeTypes).To(HaveLen(1))
|
||||
Expect(allowedModeTypes).To(ContainElement(v1alpha1.SharedClusterMode))
|
||||
})
|
||||
|
||||
It("should create a NetworkPolicy", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// look for network policies etc
|
||||
clusterSetNetworkPolicy := &networkingv1.NetworkPolicy{}
|
||||
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, clusterSetNetworkPolicy)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
spec := clusterSetNetworkPolicy.Spec
|
||||
Expect(spec.PolicyTypes).To(ContainElement(networkingv1.PolicyTypeEgress))
|
||||
Expect(spec.PolicyTypes).To(ContainElement(networkingv1.PolicyTypeIngress))
|
||||
|
||||
// ingress should allow everything
|
||||
Expect(spec.Ingress).To(ConsistOf(networkingv1.NetworkPolicyIngressRule{}))
|
||||
|
||||
// egress should contains some rules
|
||||
Expect(spec.Egress).To(HaveLen(1))
|
||||
|
||||
// allow networking to all external IPs
|
||||
ipBlockRule := networkingv1.NetworkPolicyPeer{
|
||||
IPBlock: &networkingv1.IPBlock{CIDR: "0.0.0.0/0"},
|
||||
}
|
||||
|
||||
// allow networking in the same namespace
|
||||
clusterSetNamespaceRule := networkingv1.NetworkPolicyPeer{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"kubernetes.io/metadata.name": namespace},
|
||||
},
|
||||
}
|
||||
|
||||
// allow networking to the "kube-dns" pod in the "kube-system" namespace
|
||||
kubeDNSRule := networkingv1.NetworkPolicyPeer{
|
||||
PodSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"k8s-app": "kube-dns"},
|
||||
},
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"kubernetes.io/metadata.name": "kube-system"},
|
||||
},
|
||||
}
|
||||
|
||||
Expect(spec.Egress[0].To).To(ContainElements(
|
||||
ipBlockRule, clusterSetNamespaceRule, kubeDNSRule,
|
||||
))
|
||||
})
|
||||
})
|
||||
|
||||
When("created with DisableNetworkPolicy", func() {
|
||||
It("should not create a NetworkPolicy if true", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
DisableNetworkPolicy: true,
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait for a bit for the network policy, but it should not be created
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
err := k8sClient.Get(ctx, key, &networkingv1.NetworkPolicy{})
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should delete the NetworkPolicy if changed to false", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// look for network policy
|
||||
clusterSetNetworkPolicy := &networkingv1.NetworkPolicy{}
|
||||
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, clusterSetNetworkPolicy)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
clusterSet.Spec.DisableNetworkPolicy = true
|
||||
err = k8sClient.Update(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait for a bit for the network policy to being deleted
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
err := k8sClient.Get(ctx, key, clusterSetNetworkPolicy)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should recreate the NetworkPolicy if deleted", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// look for network policy
|
||||
clusterSetNetworkPolicy := &networkingv1.NetworkPolicy{}
|
||||
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
return k8sClient.Get(context.Background(), key, clusterSetNetworkPolicy)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
err = k8sClient.Delete(ctx, clusterSetNetworkPolicy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
err = k8sClient.Get(ctx, key, clusterSetNetworkPolicy)
|
||||
Expect(apierrors.IsNotFound(err)).Should(BeTrue())
|
||||
|
||||
// wait a bit for the network policy to being recreated
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, clusterSetNetworkPolicy)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
When("created specifying the mode", func() {
|
||||
It("should have the 'virtual' mode if specified", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
AllowedNodeTypes: []v1alpha1.ClusterMode{
|
||||
v1alpha1.VirtualClusterMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
allowedModeTypes := clusterSet.Spec.AllowedNodeTypes
|
||||
Expect(allowedModeTypes).To(HaveLen(1))
|
||||
Expect(allowedModeTypes).To(ContainElement(v1alpha1.VirtualClusterMode))
|
||||
})
|
||||
|
||||
It("should have both modes if specified", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
AllowedNodeTypes: []v1alpha1.ClusterMode{
|
||||
v1alpha1.SharedClusterMode,
|
||||
v1alpha1.VirtualClusterMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
allowedModeTypes := clusterSet.Spec.AllowedNodeTypes
|
||||
Expect(allowedModeTypes).To(HaveLen(2))
|
||||
Expect(allowedModeTypes).To(ContainElements(
|
||||
v1alpha1.SharedClusterMode,
|
||||
v1alpha1.VirtualClusterMode,
|
||||
))
|
||||
})
|
||||
|
||||
It("should fail for a non-existing mode", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
AllowedNodeTypes: []v1alpha1.ClusterMode{
|
||||
v1alpha1.SharedClusterMode,
|
||||
v1alpha1.VirtualClusterMode,
|
||||
v1alpha1.ClusterMode("non-existing"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
When("created specifying the podSecurityAdmissionLevel", func() {
|
||||
It("should add and update the proper pod-security labels to the namespace", func() {
|
||||
var (
|
||||
privileged = v1alpha1.PrivilegedPodSecurityAdmissionLevel
|
||||
baseline = v1alpha1.BaselinePodSecurityAdmissionLevel
|
||||
restricted = v1alpha1.RestrictedPodSecurityAdmissionLevel
|
||||
)
|
||||
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
PodSecurityAdmissionLevel: &privileged,
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
var ns corev1.Namespace
|
||||
|
||||
// Check privileged
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return enforceValue == "privileged"
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "privileged"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn")))
|
||||
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn-version")))
|
||||
|
||||
// Check baseline
|
||||
|
||||
clusterSet.Spec.PodSecurityAdmissionLevel = &baseline
|
||||
err = k8sClient.Update(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return enforceValue == "baseline"
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "baseline"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/warn", "baseline"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/warn-version", "latest"))
|
||||
|
||||
// Check restricted
|
||||
|
||||
clusterSet.Spec.PodSecurityAdmissionLevel = &restricted
|
||||
err = k8sClient.Update(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return enforceValue == "restricted"
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "restricted"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/warn", "restricted"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/warn-version", "latest"))
|
||||
|
||||
// check cleanup
|
||||
|
||||
clusterSet.Spec.PodSecurityAdmissionLevel = nil
|
||||
err = k8sClient.Update(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
_, found := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return found
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeFalse())
|
||||
|
||||
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/enforce")))
|
||||
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/enforce-version")))
|
||||
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn")))
|
||||
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn-version")))
|
||||
})
|
||||
|
||||
It("should restore the labels if Namespace is updated", func() {
|
||||
privileged := v1alpha1.PrivilegedPodSecurityAdmissionLevel
|
||||
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
PodSecurityAdmissionLevel: &privileged,
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
var ns corev1.Namespace
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return enforceValue == "privileged"
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "privileged"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
|
||||
ns.Labels["pod-security.kubernetes.io/enforce"] = "baseline"
|
||||
err = k8sClient.Update(ctx, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit for the namespace to be restored
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return enforceValue == "privileged"
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "privileged"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
})
|
||||
})
|
||||
|
||||
When("a cluster in the same namespace is present", func() {
|
||||
It("should update it if needed", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
DefaultPriorityClass: "foobar",
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
Servers: ptr.To[int32](1),
|
||||
Agents: ptr.To[int32](0),
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return cluster.Spec.PriorityClass == clusterSet.Spec.DefaultPriorityClass
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should update the nodeSelector", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
Servers: ptr.To[int32](1),
|
||||
Agents: ptr.To[int32](0),
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(cluster.Spec.NodeSelector, clusterSet.Spec.DefaultNodeSelector)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should update the nodeSelector if changed", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
Servers: ptr.To[int32](1),
|
||||
Agents: ptr.To[int32](0),
|
||||
NodeSelector: map[string]string{"label-1": "value-1"},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(cluster.Spec.NodeSelector).To(Equal(clusterSet.Spec.DefaultNodeSelector))
|
||||
|
||||
// update the ClusterSet
|
||||
clusterSet.Spec.DefaultNodeSelector["label-2"] = "value-2"
|
||||
err = k8sClient.Update(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.NodeSelector).To(Not(Equal(clusterSet.Spec.DefaultNodeSelector)))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(cluster.Spec.NodeSelector, clusterSet.Spec.DefaultNodeSelector)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
|
||||
// Update the Cluster
|
||||
cluster.Spec.NodeSelector["label-3"] = "value-3"
|
||||
err = k8sClient.Update(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.NodeSelector).To(Not(Equal(clusterSet.Spec.DefaultNodeSelector)))
|
||||
|
||||
// wait a bit and check it's restored
|
||||
Eventually(func() bool {
|
||||
var updatedCluster v1alpha1.Cluster
|
||||
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, &updatedCluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(updatedCluster.Spec.NodeSelector, clusterSet.Spec.DefaultNodeSelector)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
When("a cluster in a different namespace is present", func() {
|
||||
It("should not be update", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
DefaultPriorityClass: "foobar",
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
namespace2 := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
|
||||
err = k8sClient.Create(ctx, namespace2)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace2.Name,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
Servers: ptr.To[int32](1),
|
||||
Agents: ptr.To[int32](0),
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// it should not change!
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return cluster.Spec.PriorityClass != clusterSet.Spec.DefaultPriorityClass
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,95 +0,0 @@
|
||||
package clusterset
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
const (
|
||||
nodeController = "k3k-node-controller"
|
||||
)
|
||||
|
||||
type NodeReconciler struct {
|
||||
Client ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
ClusterCIDR string
|
||||
}
|
||||
|
||||
// AddNodeController adds a new controller to the manager
|
||||
func AddNodeController(ctx context.Context, mgr manager.Manager) error {
|
||||
// initialize a new Reconciler
|
||||
reconciler := NodeReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1.Node{}).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
}).
|
||||
Named(nodeController).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (n *NodeReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("node", req.NamespacedName)
|
||||
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
|
||||
|
||||
log.Info("reconciling node")
|
||||
|
||||
var clusterSetList v1alpha1.ClusterSetList
|
||||
if err := n.Client.List(ctx, &clusterSetList); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if len(clusterSetList.Items) <= 0 {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
if err := n.ensureNetworkPolicies(ctx, clusterSetList); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (n *NodeReconciler) ensureNetworkPolicies(ctx context.Context, clusterSetList v1alpha1.ClusterSetList) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring network policies")
|
||||
|
||||
var setNetworkPolicy *networkingv1.NetworkPolicy
|
||||
|
||||
for _, cs := range clusterSetList.Items {
|
||||
if cs.Spec.DisableNetworkPolicy {
|
||||
continue
|
||||
}
|
||||
|
||||
log = log.WithValues("clusterset", cs.Namespace+"/"+cs.Name)
|
||||
log.Info("updating NetworkPolicy for ClusterSet")
|
||||
|
||||
var err error
|
||||
setNetworkPolicy, err = netpol(ctx, "", &cs, n.Client)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("new NetworkPolicy for clusterset")
|
||||
|
||||
if err := n.Client.Update(ctx, setNetworkPolicy); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
const (
|
||||
namePrefix = "k3k"
|
||||
k3SImageName = "rancher/k3s"
|
||||
AdminCommonName = "system:admin"
|
||||
)
|
||||
|
||||
@@ -27,16 +26,16 @@ var Backoff = wait.Backoff{
|
||||
// K3SImage returns the rancher/k3s image tagged with the specified Version.
|
||||
// If Version is empty it will use with the same k8s version of the host cluster,
|
||||
// stored in the Status object. It will return the untagged version as last fallback.
|
||||
func K3SImage(cluster *v1alpha1.Cluster) string {
|
||||
func K3SImage(cluster *v1alpha1.Cluster, k3SImage string) string {
|
||||
if cluster.Spec.Version != "" {
|
||||
return k3SImageName + ":" + cluster.Spec.Version
|
||||
return k3SImage + ":" + cluster.Spec.Version
|
||||
}
|
||||
|
||||
if cluster.Status.HostVersion != "" {
|
||||
return k3SImageName + ":" + cluster.Status.HostVersion
|
||||
return k3SImage + ":" + cluster.Status.HostVersion
|
||||
}
|
||||
|
||||
return k3SImageName
|
||||
return k3SImage
|
||||
}
|
||||
|
||||
// SafeConcatNameWithPrefix runs the SafeConcatName with extra prefix.
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -101,15 +103,41 @@ func getURLFromService(ctx context.Context, client client.Client, cluster *v1alp
|
||||
return "", err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("https://%s:%d", k3kService.Spec.ClusterIP, server.ServerPort)
|
||||
ip := k3kService.Spec.ClusterIP
|
||||
port := int32(443)
|
||||
|
||||
if k3kService.Spec.Type == v1.ServiceTypeNodePort {
|
||||
nodePort := k3kService.Spec.Ports[0].NodePort
|
||||
url = fmt.Sprintf("https://%s:%d", hostServerIP, nodePort)
|
||||
switch k3kService.Spec.Type {
|
||||
case v1.ServiceTypeNodePort:
|
||||
ip = hostServerIP
|
||||
port = k3kService.Spec.Ports[0].NodePort
|
||||
case v1.ServiceTypeLoadBalancer:
|
||||
ip = k3kService.Status.LoadBalancer.Ingress[0].IP
|
||||
port = k3kService.Spec.Ports[0].Port
|
||||
}
|
||||
|
||||
expose := cluster.Spec.Expose
|
||||
if expose != nil && expose.Ingress != nil {
|
||||
if !slices.Contains(cluster.Status.TLSSANs, ip) {
|
||||
logrus.Warnf("ip %s not in tlsSANs", ip)
|
||||
|
||||
if len(cluster.Spec.TLSSANs) > 0 {
|
||||
logrus.Warnf("Using the first TLS SAN in the spec as a fallback: %s", cluster.Spec.TLSSANs[0])
|
||||
|
||||
ip = cluster.Spec.TLSSANs[0]
|
||||
} else if len(cluster.Status.TLSSANs) > 0 {
|
||||
logrus.Warnf("No explicit tlsSANs specified. Trying to use the first TLS SAN in the status: %s", cluster.Status.TLSSANs[0])
|
||||
|
||||
ip = cluster.Status.TLSSANs[0]
|
||||
} else {
|
||||
logrus.Warn("ip not found in tlsSANs. This could cause issue with the certificate validation.")
|
||||
}
|
||||
}
|
||||
|
||||
url := "https://" + ip
|
||||
if port != 443 {
|
||||
url = fmt.Sprintf("%s:%d", url, port)
|
||||
}
|
||||
|
||||
// if ingress is specified, use the ingress host
|
||||
if cluster.Spec.Expose != nil && cluster.Spec.Expose.Ingress != nil {
|
||||
var k3kIngress networkingv1.Ingress
|
||||
|
||||
ingressKey := types.NamespacedName{
|
||||
|
||||
85
pkg/controller/policy/namespace.go
Normal file
85
pkg/controller/policy/namespace.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// reconcileNamespacePodSecurityLabels will update the labels of the namespace to reconcile the PSA level specified in the VirtualClusterPolicy
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling PSA labels")
|
||||
|
||||
// cleanup of old labels
|
||||
delete(namespace.Labels, "pod-security.kubernetes.io/enforce")
|
||||
delete(namespace.Labels, "pod-security.kubernetes.io/enforce-version")
|
||||
delete(namespace.Labels, "pod-security.kubernetes.io/warn")
|
||||
delete(namespace.Labels, "pod-security.kubernetes.io/warn-version")
|
||||
|
||||
// if a PSA level is specified add the proper labels
|
||||
if policy.Spec.PodSecurityAdmissionLevel != nil {
|
||||
psaLevel := *policy.Spec.PodSecurityAdmissionLevel
|
||||
|
||||
namespace.Labels["pod-security.kubernetes.io/enforce"] = string(psaLevel)
|
||||
namespace.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
|
||||
|
||||
// skip the 'warn' only for the privileged PSA level
|
||||
if psaLevel != v1alpha1.PrivilegedPodSecurityAdmissionLevel {
|
||||
namespace.Labels["pod-security.kubernetes.io/warn"] = string(psaLevel)
|
||||
namespace.Labels["pod-security.kubernetes.io/warn-version"] = "latest"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupNamespaces will cleanup the Namespaces without the "policy.k3k.io/policy-name" label
|
||||
// deleting the resources in them with the "app.kubernetes.io/managed-by=k3k-policy-controller" label
|
||||
func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("deleting resources")
|
||||
|
||||
var namespaces v1.NamespaceList
|
||||
if err := c.Client.List(ctx, &namespaces); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
deleteOpts := []client.DeleteAllOfOption{
|
||||
client.InNamespace(ns.Name),
|
||||
client.MatchingLabels{ManagedByLabelKey: VirtualPolicyControllerName},
|
||||
}
|
||||
|
||||
// if the namespace is bound to a policy -> cleanup resources of other policies
|
||||
if ns.Labels[PolicyNameLabelKey] != "" {
|
||||
requirement, err := labels.NewRequirement(PolicyNameLabelKey, selection.NotEquals, []string{ns.Labels[PolicyNameLabelKey]})
|
||||
|
||||
// log the error but continue cleaning up the other namespaces
|
||||
if err != nil {
|
||||
log.Error(err, "error creating requirement", "policy", ns.Labels[PolicyNameLabelKey])
|
||||
} else {
|
||||
sel := labels.NewSelector().Add(*requirement)
|
||||
deleteOpts = append(deleteOpts, client.MatchingLabelsSelector{Selector: sel})
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.Client.DeleteAllOf(ctx, &networkingv1.NetworkPolicy{}, deleteOpts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Client.DeleteAllOf(ctx, &v1.ResourceQuota{}, deleteOpts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Client.DeleteAllOf(ctx, &v1.LimitRange{}, deleteOpts...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
115
pkg/controller/policy/networkpolicy.go
Normal file
115
pkg/controller/policy/networkpolicy.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling NetworkPolicy")
|
||||
|
||||
var cidrList []string
|
||||
|
||||
if c.ClusterCIDR != "" {
|
||||
cidrList = []string{c.ClusterCIDR}
|
||||
} else {
|
||||
var nodeList v1.NodeList
|
||||
if err := c.Client.List(ctx, &nodeList); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
if len(node.Spec.PodCIDRs) > 0 {
|
||||
cidrList = append(cidrList, node.Spec.PodCIDRs...)
|
||||
} else {
|
||||
cidrList = append(cidrList, node.Spec.PodCIDR)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
networkPolicy := networkPolicy(namespace, policy, cidrList)
|
||||
|
||||
if err := ctrl.SetControllerReference(policy, networkPolicy, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if disabled then delete the existing network policy
|
||||
if policy.Spec.DisableNetworkPolicy {
|
||||
err := c.Client.Delete(ctx, networkPolicy)
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// otherwise try to create/update
|
||||
err := c.Client.Create(ctx, networkPolicy)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return c.Client.Update(ctx, networkPolicy)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func networkPolicy(namespaceName string, policy *v1alpha1.VirtualClusterPolicy, cidrList []string) *networkingv1.NetworkPolicy {
|
||||
return &networkingv1.NetworkPolicy{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "NetworkPolicy",
|
||||
APIVersion: "networking.k8s.io/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespaceName,
|
||||
Labels: map[string]string{
|
||||
ManagedByLabelKey: VirtualPolicyControllerName,
|
||||
PolicyNameLabelKey: policy.Name,
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PolicyTypes: []networkingv1.PolicyType{
|
||||
networkingv1.PolicyTypeIngress,
|
||||
networkingv1.PolicyTypeEgress,
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{
|
||||
{},
|
||||
},
|
||||
Egress: []networkingv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
To: []networkingv1.NetworkPolicyPeer{
|
||||
{
|
||||
IPBlock: &networkingv1.IPBlock{
|
||||
CIDR: "0.0.0.0/0",
|
||||
Except: cidrList,
|
||||
},
|
||||
},
|
||||
{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"kubernetes.io/metadata.name": namespaceName,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"kubernetes.io/metadata.name": metav1.NamespaceSystem,
|
||||
},
|
||||
},
|
||||
PodSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"k8s-app": "kube-dns",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
460
pkg/controller/policy/policy.go
Normal file
460
pkg/controller/policy/policy.go
Normal file
@@ -0,0 +1,460 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
const (
|
||||
PolicyNameLabelKey = "policy.k3k.io/policy-name"
|
||||
ManagedByLabelKey = "app.kubernetes.io/managed-by"
|
||||
VirtualPolicyControllerName = "k3k-policy-controller"
|
||||
)
|
||||
|
||||
type VirtualClusterPolicyReconciler struct {
|
||||
Client client.Client
|
||||
Scheme *runtime.Scheme
|
||||
ClusterCIDR string
|
||||
}
|
||||
|
||||
// Add the controller to manage the Virtual Cluster policies
|
||||
func Add(mgr manager.Manager, clusterCIDR string) error {
|
||||
reconciler := VirtualClusterPolicyReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
ClusterCIDR: clusterCIDR,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.VirtualClusterPolicy{}).
|
||||
Watches(&v1.Namespace{}, namespaceEventHandler()).
|
||||
Watches(&v1.Node{}, nodeEventHandler(&reconciler)).
|
||||
Watches(&v1alpha1.Cluster{}, clusterEventHandler(&reconciler)).
|
||||
Owns(&networkingv1.NetworkPolicy{}).
|
||||
Owns(&v1.ResourceQuota{}).
|
||||
Owns(&v1.LimitRange{}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
// namespaceEventHandler will enqueue a reconciliation of VCP when a Namespace changes
|
||||
func namespaceEventHandler() handler.Funcs {
|
||||
return handler.Funcs{
|
||||
// When a Namespace is created, if it has the "policy.k3k.io/policy-name" label
|
||||
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
ns, ok := e.Object.(*v1.Namespace)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if ns.Labels[PolicyNameLabelKey] != "" {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: ns.Labels[PolicyNameLabelKey]}})
|
||||
}
|
||||
|
||||
},
|
||||
// When a Namespace is updated, if it has the "policy.k3k.io/policy-name" label
|
||||
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
oldNs, okOld := e.ObjectOld.(*v1.Namespace)
|
||||
newNs, okNew := e.ObjectNew.(*v1.Namespace)
|
||||
|
||||
if !okOld || !okNew {
|
||||
return
|
||||
}
|
||||
|
||||
oldVCPName := oldNs.Labels[PolicyNameLabelKey]
|
||||
newVCPName := newNs.Labels[PolicyNameLabelKey]
|
||||
|
||||
// If labels haven't changed we can skip the reconciliation
|
||||
if reflect.DeepEqual(oldNs.Labels, newNs.Labels) {
|
||||
return
|
||||
}
|
||||
|
||||
// If No VCP before and after we can skip the reconciliation
|
||||
if oldVCPName == "" && newVCPName == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// The VCP has not changed, but we enqueue a reconciliation because the PSA or other labels have changed
|
||||
if oldVCPName == newVCPName {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: oldVCPName}})
|
||||
return
|
||||
}
|
||||
|
||||
// Enqueue the old VCP name for cleanup
|
||||
if oldVCPName != "" {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: oldVCPName}})
|
||||
}
|
||||
|
||||
// Enqueue the new VCP name
|
||||
if newVCPName != "" {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: newVCPName}})
|
||||
}
|
||||
},
|
||||
// When a namespace is deleted all the resources in the namespace are deleted
|
||||
// but we trigger the reconciliation to eventually perform some cluster-wide cleanup if necessary
|
||||
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
ns, ok := e.Object.(*v1.Namespace)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if ns.Labels[PolicyNameLabelKey] != "" {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: ns.Labels[PolicyNameLabelKey]}})
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// nodeEventHandler will enqueue a reconciliation of all the VCPs when a Node changes.
|
||||
// This happens only if the ClusterCIDR is NOT specified, to handle the PodCIDRs in the NetworkPolicies.
|
||||
func nodeEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
|
||||
// enqueue all the available VirtualClusterPolicies
|
||||
enqueueAllVCPs := func(ctx context.Context, q workqueue.RateLimitingInterface) {
|
||||
vcpList := &v1alpha1.VirtualClusterPolicyList{}
|
||||
if err := r.Client.List(ctx, vcpList); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, vcp := range vcpList.Items {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: vcp.Name}})
|
||||
}
|
||||
}
|
||||
|
||||
return handler.Funcs{
|
||||
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
if r.ClusterCIDR != "" {
|
||||
return
|
||||
}
|
||||
|
||||
enqueueAllVCPs(ctx, q)
|
||||
},
|
||||
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
if r.ClusterCIDR != "" {
|
||||
return
|
||||
}
|
||||
|
||||
oldNode, okOld := e.ObjectOld.(*v1.Node)
|
||||
newNode, okNew := e.ObjectNew.(*v1.Node)
|
||||
|
||||
if !okOld || !okNew {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if PodCIDR or PodCIDRs fields have changed.
|
||||
|
||||
var podCIDRChanged bool
|
||||
if oldNode.Spec.PodCIDR != newNode.Spec.PodCIDR {
|
||||
podCIDRChanged = true
|
||||
}
|
||||
if !reflect.DeepEqual(oldNode.Spec.PodCIDRs, newNode.Spec.PodCIDRs) {
|
||||
podCIDRChanged = true
|
||||
}
|
||||
|
||||
if podCIDRChanged {
|
||||
enqueueAllVCPs(ctx, q)
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
if r.ClusterCIDR != "" {
|
||||
return
|
||||
}
|
||||
|
||||
enqueueAllVCPs(ctx, q)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// clusterEventHandler will enqueue a reconciliation of the VCP associated to the Namespace when a Cluster changes.
|
||||
func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
|
||||
type clusterSubSpec struct {
|
||||
PriorityClass string
|
||||
NodeSelector map[string]string
|
||||
}
|
||||
|
||||
return handler.Funcs{
|
||||
// When a Cluster is created, if its Namespace has the "policy.k3k.io/policy-name" label
|
||||
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
cluster, ok := e.Object.(*v1alpha1.Cluster)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var ns v1.Namespace
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{Name: cluster.Namespace}, &ns); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ns.Labels[PolicyNameLabelKey] != "" {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: ns.Labels[PolicyNameLabelKey]}})
|
||||
}
|
||||
},
|
||||
// When a Cluster is updated, if its Namespace has the "policy.k3k.io/policy-name" label
|
||||
// and if some of its spec influenced by the policy changed
|
||||
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
oldCluster, okOld := e.ObjectOld.(*v1alpha1.Cluster)
|
||||
newCluster, okNew := e.ObjectNew.(*v1alpha1.Cluster)
|
||||
|
||||
if !okOld || !okNew {
|
||||
return
|
||||
}
|
||||
|
||||
var ns v1.Namespace
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{Name: oldCluster.Namespace}, &ns); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ns.Labels[PolicyNameLabelKey] == "" {
|
||||
return
|
||||
}
|
||||
|
||||
clusterSubSpecOld := clusterSubSpec{
|
||||
PriorityClass: oldCluster.Spec.PriorityClass,
|
||||
NodeSelector: oldCluster.Spec.NodeSelector,
|
||||
}
|
||||
|
||||
clusterSubSpecNew := clusterSubSpec{
|
||||
PriorityClass: newCluster.Spec.PriorityClass,
|
||||
NodeSelector: newCluster.Spec.NodeSelector,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(clusterSubSpecOld, clusterSubSpecNew) {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: ns.Labels[PolicyNameLabelKey]}})
|
||||
}
|
||||
},
|
||||
// When a Cluster is deleted -> nothing to do
|
||||
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling VirtualClusterPolicy")
|
||||
|
||||
var policy v1alpha1.VirtualClusterPolicy
|
||||
if err := c.Client.Get(ctx, req.NamespacedName, &policy); err != nil {
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
orig := policy.DeepCopy()
|
||||
|
||||
reconcilerErr := c.reconcileVirtualClusterPolicy(ctx, &policy)
|
||||
|
||||
// update Status if needed
|
||||
if !reflect.DeepEqual(orig.Status, policy.Status) {
|
||||
if err := c.Client.Status().Update(ctx, &policy); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// if there was an error during the reconciliation, return
|
||||
if reconcilerErr != nil {
|
||||
return reconcile.Result{}, reconcilerErr
|
||||
}
|
||||
|
||||
// update VirtualClusterPolicy if needed
|
||||
if !reflect.DeepEqual(orig, policy) {
|
||||
if err := c.Client.Update(ctx, &policy); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
if err := c.reconcileMatchingNamespaces(ctx, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.cleanupNamespaces(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling matching Namespaces")
|
||||
|
||||
listOpts := client.MatchingLabels{
|
||||
PolicyNameLabelKey: policy.Name,
|
||||
}
|
||||
|
||||
var namespaces v1.NamespaceList
|
||||
if err := c.Client.List(ctx, &namespaces, listOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
ctx = ctrl.LoggerInto(ctx, log.WithValues("namespace", ns.Name))
|
||||
log.Info("reconciling Namespace")
|
||||
|
||||
orig := ns.DeepCopy()
|
||||
|
||||
if err := c.reconcileNetworkPolicy(ctx, ns.Name, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.reconcileQuota(ctx, ns.Name, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.reconcileLimit(ctx, ns.Name, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.reconcileClusters(ctx, &ns, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.reconcileNamespacePodSecurityLabels(ctx, &ns, policy)
|
||||
|
||||
if !reflect.DeepEqual(orig, &ns) {
|
||||
if err := c.Client.Update(ctx, &ns); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling ResourceQuota")
|
||||
|
||||
if policy.Spec.Quota == nil {
|
||||
// check if resourceQuota object exists and deletes it.
|
||||
var toDeleteResourceQuota v1.ResourceQuota
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
if err := c.Client.Get(ctx, key, &toDeleteResourceQuota); err != nil {
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
return c.Client.Delete(ctx, &toDeleteResourceQuota)
|
||||
}
|
||||
|
||||
// create/update resource Quota
|
||||
resourceQuota := &v1.ResourceQuota{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ResourceQuota",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
ManagedByLabelKey: VirtualPolicyControllerName,
|
||||
PolicyNameLabelKey: policy.Name,
|
||||
},
|
||||
},
|
||||
Spec: *policy.Spec.Quota,
|
||||
}
|
||||
|
||||
if err := ctrl.SetControllerReference(policy, resourceQuota, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := c.Client.Create(ctx, resourceQuota)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return c.Client.Update(ctx, resourceQuota)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling LimitRange")
|
||||
|
||||
// delete limitrange if spec.limits isnt specified.
|
||||
if policy.Spec.Limit == nil {
|
||||
var toDeleteLimitRange v1.LimitRange
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
if err := c.Client.Get(ctx, key, &toDeleteLimitRange); err != nil {
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
return c.Client.Delete(ctx, &toDeleteLimitRange)
|
||||
}
|
||||
|
||||
limitRange := &v1.LimitRange{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "LimitRange",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
ManagedByLabelKey: VirtualPolicyControllerName,
|
||||
PolicyNameLabelKey: policy.Name,
|
||||
},
|
||||
},
|
||||
Spec: *policy.Spec.Limit,
|
||||
}
|
||||
|
||||
if err := ctrl.SetControllerReference(policy, limitRange, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := c.Client.Create(ctx, limitRange)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return c.Client.Update(ctx, limitRange)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling Clusters")
|
||||
|
||||
var clusters v1alpha1.ClusterList
|
||||
if err := c.Client.List(ctx, &clusters, client.InNamespace(namespace.Name)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var clusterUpdateErrs []error
|
||||
|
||||
for _, cluster := range clusters.Items {
|
||||
orig := cluster.DeepCopy()
|
||||
|
||||
cluster.Spec.PriorityClass = policy.Spec.DefaultPriorityClass
|
||||
cluster.Spec.NodeSelector = policy.Spec.DefaultNodeSelector
|
||||
|
||||
if !reflect.DeepEqual(orig, cluster) {
|
||||
// continue updating also the other clusters even if an error occurred
|
||||
clusterUpdateErrs = append(clusterUpdateErrs, c.Client.Update(ctx, &cluster))
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(clusterUpdateErrs...)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package clusterset_test
|
||||
package policy_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/clusterset"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
"go.uber.org/zap"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
|
||||
func TestController(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "ClusterSet Controller Suite")
|
||||
RunSpecs(t, "VirtualClusterPolicy Controller Suite")
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -54,7 +54,7 @@ var _ = BeforeSuite(func() {
|
||||
ctrl.SetLogger(zapr.NewLogger(zap.NewNop()))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
err = clusterset.Add(ctx, mgr, "")
|
||||
err = policy.Add(mgr, "")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
585
pkg/controller/policy/policy_test.go
Normal file
585
pkg/controller/policy/policy_test.go
Normal file
@@ -0,0 +1,585 @@
|
||||
package policy_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("VirtualClusterPolicy"), func() {
|
||||
|
||||
Context("creating a VirtualClusterPolicy", func() {
|
||||
|
||||
It("should have the 'shared' allowedMode", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
|
||||
Expect(policy.Spec.AllowedMode).To(Equal(v1alpha1.SharedClusterMode))
|
||||
})
|
||||
|
||||
It("should have the 'virtual' mode if specified", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
AllowedMode: v1alpha1.VirtualClusterMode,
|
||||
})
|
||||
|
||||
Expect(policy.Spec.AllowedMode).To(Equal(v1alpha1.VirtualClusterMode))
|
||||
})
|
||||
|
||||
It("should fail for a non-existing mode", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "policy-",
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
AllowedMode: v1alpha1.ClusterMode("non-existing"),
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
When("bound to a namespace", func() {
|
||||
|
||||
var namespace *v1.Namespace
|
||||
|
||||
BeforeEach(func() {
|
||||
namespace = &v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ns-",
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, namespace)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("should create a NetworkPolicy", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
// look for network policies etc
|
||||
networkPolicy := &networkingv1.NetworkPolicy{}
|
||||
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, networkPolicy)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
spec := networkPolicy.Spec
|
||||
Expect(spec.PolicyTypes).To(ContainElement(networkingv1.PolicyTypeEgress))
|
||||
Expect(spec.PolicyTypes).To(ContainElement(networkingv1.PolicyTypeIngress))
|
||||
|
||||
// ingress should allow everything
|
||||
Expect(spec.Ingress).To(ConsistOf(networkingv1.NetworkPolicyIngressRule{}))
|
||||
|
||||
// egress should contains some rules
|
||||
Expect(spec.Egress).To(HaveLen(1))
|
||||
|
||||
// allow networking to all external IPs
|
||||
ipBlockRule := networkingv1.NetworkPolicyPeer{
|
||||
IPBlock: &networkingv1.IPBlock{CIDR: "0.0.0.0/0"},
|
||||
}
|
||||
|
||||
// allow networking in the same namespace
|
||||
namespaceRule := networkingv1.NetworkPolicyPeer{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"kubernetes.io/metadata.name": namespace.Name},
|
||||
},
|
||||
}
|
||||
|
||||
// allow networking to the "kube-dns" pod in the "kube-system" namespace
|
||||
kubeDNSRule := networkingv1.NetworkPolicyPeer{
|
||||
PodSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"k8s-app": "kube-dns"},
|
||||
},
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"kubernetes.io/metadata.name": "kube-system"},
|
||||
},
|
||||
}
|
||||
|
||||
Expect(spec.Egress[0].To).To(ContainElements(
|
||||
ipBlockRule, namespaceRule, kubeDNSRule,
|
||||
))
|
||||
})
|
||||
|
||||
It("should recreate the NetworkPolicy if deleted", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
// look for network policy
|
||||
networkPolicy := &networkingv1.NetworkPolicy{}
|
||||
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
return k8sClient.Get(context.Background(), key, networkPolicy)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
err := k8sClient.Delete(ctx, networkPolicy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
err = k8sClient.Get(ctx, key, networkPolicy)
|
||||
Expect(apierrors.IsNotFound(err)).Should(BeTrue())
|
||||
|
||||
// wait a bit for the network policy to being recreated
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, networkPolicy)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
})
|
||||
|
||||
It("should add and update the proper pod-security labels to the namespace", func() {
|
||||
var (
|
||||
privileged = v1alpha1.PrivilegedPodSecurityAdmissionLevel
|
||||
baseline = v1alpha1.BaselinePodSecurityAdmissionLevel
|
||||
restricted = v1alpha1.RestrictedPodSecurityAdmissionLevel
|
||||
)
|
||||
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
PodSecurityAdmissionLevel: &privileged,
|
||||
})
|
||||
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
var ns v1.Namespace
|
||||
|
||||
// Check privileged
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() string {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(Equal("privileged"))
|
||||
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "privileged"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn")))
|
||||
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn-version")))
|
||||
|
||||
// Check baseline
|
||||
|
||||
policy.Spec.PodSecurityAdmissionLevel = &baseline
|
||||
err := k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() string {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(Equal("baseline"))
|
||||
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "baseline"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/warn", "baseline"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/warn-version", "latest"))
|
||||
|
||||
// Check restricted
|
||||
|
||||
policy.Spec.PodSecurityAdmissionLevel = &restricted
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() string {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(Equal("restricted"))
|
||||
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "restricted"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/warn", "restricted"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/warn-version", "latest"))
|
||||
|
||||
// check cleanup
|
||||
|
||||
policy.Spec.PodSecurityAdmissionLevel = nil
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
_, found := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return found
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeFalse())
|
||||
|
||||
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/enforce")))
|
||||
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/enforce-version")))
|
||||
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn")))
|
||||
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn-version")))
|
||||
})
|
||||
|
||||
It("should restore the labels if Namespace is updated", func() {
|
||||
privileged := v1alpha1.PrivilegedPodSecurityAdmissionLevel
|
||||
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
PodSecurityAdmissionLevel: &privileged,
|
||||
})
|
||||
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
var ns v1.Namespace
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() bool {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return enforceValue == "privileged"
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "privileged"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
|
||||
ns.Labels["pod-security.kubernetes.io/enforce"] = "baseline"
|
||||
err := k8sClient.Update(ctx, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit for the namespace to be restored
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return enforceValue == "privileged"
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "privileged"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
})
|
||||
|
||||
It("should update Cluster's PriorityClass", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
DefaultPriorityClass: "foobar",
|
||||
})
|
||||
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
Servers: ptr.To[int32](1),
|
||||
Agents: ptr.To[int32](0),
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return cluster.Spec.PriorityClass == policy.Spec.DefaultPriorityClass
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should update Cluster's NodeSelector", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
})
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
err := k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
Servers: ptr.To[int32](1),
|
||||
Agents: ptr.To[int32](0),
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(cluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should update the nodeSelector if changed", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
})
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
Servers: ptr.To[int32](1),
|
||||
Agents: ptr.To[int32](0),
|
||||
NodeSelector: map[string]string{"label-1": "value-1"},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(cluster.Spec.NodeSelector).To(Equal(policy.Spec.DefaultNodeSelector))
|
||||
|
||||
// update the VirtualClusterPolicy
|
||||
policy.Spec.DefaultNodeSelector["label-2"] = "value-2"
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.NodeSelector).To(Not(Equal(policy.Spec.DefaultNodeSelector)))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(cluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
|
||||
// Update the Cluster
|
||||
cluster.Spec.NodeSelector["label-3"] = "value-3"
|
||||
err = k8sClient.Update(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.NodeSelector).To(Not(Equal(policy.Spec.DefaultNodeSelector)))
|
||||
|
||||
// wait a bit and check it's restored
|
||||
Eventually(func() bool {
|
||||
var updatedCluster v1alpha1.Cluster
|
||||
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, &updatedCluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(updatedCluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should create a ResourceQuota if Quota is enabled", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
Quota: &v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("800m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
var resourceQuota v1.ResourceQuota
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
|
||||
return k8sClient.Get(ctx, key, &resourceQuota)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
Expect(resourceQuota.Spec.Hard.Cpu().String()).To(BeEquivalentTo("800m"))
|
||||
Expect(resourceQuota.Spec.Hard.Memory().String()).To(BeEquivalentTo("1Gi"))
|
||||
})
|
||||
|
||||
It("should delete the ResourceQuota if Quota is deleted", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
Quota: &v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("800m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
var resourceQuota v1.ResourceQuota
|
||||
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, &resourceQuota)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
policy.Spec.Quota = nil
|
||||
err := k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait for a bit for the resourceQuota to be deleted
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
err := k8sClient.Get(ctx, key, &resourceQuota)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should delete the ResourceQuota if unbound", func() {
|
||||
clusterPolicy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
Quota: &v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("800m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
bindPolicyToNamespace(namespace, clusterPolicy)
|
||||
|
||||
var resourceQuota v1.ResourceQuota
|
||||
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterPolicy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, &resourceQuota)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
fmt.Printf("%+v\n", resourceQuota)
|
||||
|
||||
delete(namespace.Labels, policy.PolicyNameLabelKey)
|
||||
err := k8sClient.Update(ctx, namespace)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait for a bit for the resourceQuota to be deleted
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterPolicy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
err := k8sClient.Get(ctx, key, &resourceQuota)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func newPolicy(spec v1alpha1.VirtualClusterPolicySpec) *v1alpha1.VirtualClusterPolicy {
|
||||
GinkgoHelper()
|
||||
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "policy-",
|
||||
},
|
||||
Spec: spec,
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return policy
|
||||
}
|
||||
|
||||
func bindPolicyToNamespace(namespace *v1.Namespace, pol *v1alpha1.VirtualClusterPolicy) {
|
||||
GinkgoHelper()
|
||||
|
||||
if len(namespace.Labels) == 0 {
|
||||
namespace.Labels = map[string]string{}
|
||||
}
|
||||
|
||||
namespace.Labels[policy.PolicyNameLabelKey] = pol.Name
|
||||
|
||||
err := k8sClient.Update(ctx, namespace)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
13
scripts/generate
Executable file
13
scripts/generate
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eou pipefail
|
||||
|
||||
|
||||
CONTROLLER_TOOLS_VERSION=v0.14.0
|
||||
|
||||
# This will return non-zero until all of our objects in ./pkg/apis can generate valid crds.
|
||||
# allowDangerousTypes is needed for struct that use floats
|
||||
go run sigs.k8s.io/controller-tools/cmd/controller-gen@${CONTROLLER_TOOLS_VERSION} \
|
||||
crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=false \
|
||||
object paths=./pkg/apis/... \
|
||||
output:crd:dir=./charts/k3k/crds
|
||||
@@ -165,7 +165,7 @@ var _ = When("a dynamic cluster is installed", func() {
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
return serverPods.Items[0].DeletionTimestamp
|
||||
}).
|
||||
WithTimeout(30 * time.Second).
|
||||
WithTimeout(60 * time.Second).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeNil())
|
||||
|
||||
|
||||
Reference in New Issue
Block a user