Compare commits

..

25 Commits

Author SHA1 Message Date
Enrico Candino
8ffdc9bafd renaming webhook (#506) 2025-10-13 17:25:17 +02:00
Enrico Candino
594c2571c3 promoted v1alpha1 resources to v1beta1 (#505) 2025-10-13 17:24:56 +02:00
Hussein Galal
12971f55a6 Add k8s version upgrade test (#503)
* Add k8s version upgrade test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove unused functions

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-13 17:14:25 +03:00
Enrico Candino
99f750525f Fix extraEnv and other Helm values (#500)
* fix for extraEnv

* moved env var to flags

* changed resources as object

* renamed replicaCount to replicas

* cleanup spaces

* moved some values and spacing

* renamed some flags
2025-10-13 12:50:07 +02:00
Hussein Galal
a0fd472841 Use K3S host cluster for E2E tests (#492)
* Add kubeconfig to e2e_tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add E2E_KUBECONFIG env variable

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix yaml permissions for kubeconfig

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix image name and use ttl.sh

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add uuidgen result to a file

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add hostIP

* Add k3s version to e2e test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove comment

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove virtual mode tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix failed test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add KUBECONFIG env variable to the make install

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add k3kcli to github_path

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Use docker installation for testing the cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix test cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-08 15:39:35 +03:00
Enrico Candino
7387fc1b23 Fix Service reconciliation error loop (#497)
* fix service reconciliation error by adding checks for virtual service annotations

* renamed var
2025-10-08 14:03:50 +02:00
Enrico Candino
9f265c73d9 Fix for HA server deletion (#493)
* wip

* wip

* wip

* removed todo
2025-10-08 13:23:15 +02:00
Enrico Candino
00ef6d582c Add log-format, and cleanup (#494)
* using logr.Logger

* testing levels

* adding log format

* fix lint

* removed tests

* final cleanup
2025-10-08 13:19:57 +02:00
Enrico Candino
5c95ca3dfa Fix for pod eviction in host cluster (#484)
* update statefulset controller

* fix for single pod

* adding pod controller

* added test

* removed comment

* merged service controller

* revert statefulset

* added test

* added common owner filter
2025-10-03 16:22:54 +02:00
jpgouin
6523b8339b change the default storage request size request to 2Gi (#490)
* change the default storage request size request to 2Gi
2025-10-03 09:04:13 +02:00
Hussein Galal
80037e815f Adding upgrade path tests (#481)
* Adding upgrade path tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Remove update label

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-02 14:53:08 +03:00
Enrico Candino
7585611792 Rename PodController to StatefulSetController (#482)
* renamed pod.go

* update statefulset controller

* fix for single pod

* added test, revert finalizer

* wip ha deletion

* revert logic

* remove focus
2025-10-01 17:06:24 +02:00
Hussein Galal
0bd681ab60 Lb service status sync (#451)
* Sync service LB status back to virtual service

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Sync service LB status back to virtual service

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-01 13:25:31 +03:00
Hussein Galal
4fe36b3d0c Bump Chart to v0.3.5 (#485)
* Bump Chart to v0.3.5

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Bump Chart to v0.3.5

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-30 15:26:59 +03:00
Enrico Candino
01589bb359 splitting tests (#461) 2025-09-23 12:07:49 +02:00
Hussein Galal
30217df268 Bump chart to v0.3.5-rc1 (#467)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-17 12:14:44 +03:00
Enrico Candino
04198652d5 check for single expose mode (#466) 2025-09-17 10:39:55 +02:00
Hussein Galal
72eb819216 Add imagepullsecrets to controller, server, and agents (#455)
* Add imagepullsecrets to controller, server, and agents

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix test cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fxing tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add agent section to helm chart values

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix charts values

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixing chart and refactoring cluster config

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* default lists to the values of imagepullsecrets

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* more fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix version image function and add unit tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* simplify arguments and remove registry from the code

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-17 11:29:01 +03:00
Alex Bissessur
4d4003f6f9 fix broken k3kcli docs path (#463)
Signed-off-by: alex <alexbissessur@gmail.com>
2025-09-16 16:41:19 +02:00
Hussein Galal
aca01127f8 Fix PVC sync and sync defaults (#458)
* Fix PVC sync and sync defaults

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix PVC sync and sync defaults

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes to pvc sync

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* increase the timeout on the e2e test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* configure the syncConfig correctly in vcp

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* update docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix policy unit test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* revert timeout of the test to 20 second

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-16 13:01:12 +03:00
Enrico Candino
1550c6b45a Add k3k controller coverage data (#452)
* added k3k controller coverage data

* cleanup
2025-09-03 11:37:56 +02:00
Hussein Galal
caf785f23b Add resources sync configuration (#431)
* Add resources sync configuration

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* update docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* refactor cluster sync

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* more fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* more fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* simplify the syncerContext

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* simplify the syncerContext

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* drop the ClusterClient struct

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix updates to syncer

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* refactor secrets/configmaps sync

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* refactor secrets/configmaps sync

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add imagepullsecret translation

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add exception for deleted resources

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* linting fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove the option to disable imagepullsecret translation

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-01 14:34:29 +03:00
Hussein Galal
b3f7a8ab7f Fix subpath field (#441)
* Fix pod fieldpath annotation translation (#434)

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix overrideEnvVars

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove extra comment

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix unit tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix merge env vars

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-01 11:10:31 +03:00
Enrico Candino
bd2494a0a9 Bump Charts to 0.3.4 (#446) 2025-08-28 10:57:23 +02:00
Enrico Candino
237a3cb280 Bump Charts to 0.3.4-rc3 (#445) 2025-08-25 19:02:30 +02:00
99 changed files with 5532 additions and 2201 deletions

View File

@@ -106,7 +106,7 @@ jobs:
kubectl create namespace k3k-mycluster
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster
@@ -259,7 +259,7 @@ jobs:
kubectl create namespace k3k-mycluster
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster

View File

@@ -79,11 +79,31 @@ jobs:
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Setup environment
run: |
mkdir ${{ github.workspace }}/covdata
echo "COVERAGE=true" >> $GITHUB_ENV
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
echo "VERSION=1h" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
- name: Build and package
- name: Install k3s
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
- name: Build and package and push dev images
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
REPO: ${{ env.REPO }}
VERSION: ${{ env.VERSION }}
run: |
make build
make package
make push
make install
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
@@ -92,9 +112,23 @@ jobs:
run: k3kcli -v
- name: Run e2e tests
run: make test-e2e
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
REPO: ${{ env.REPO }}
VERSION: ${{ env.VERSION }}
run: make test-e2e
- name: Upload coverage reports to Codecov
- name: Convert coverage data
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
- name: Upload coverage reports to Codecov (controller)
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${GOCOVERDIR}/cover.out
flags: controller
- name: Upload coverage reports to Codecov (e2e)
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
@@ -133,12 +167,13 @@ jobs:
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Set coverage environment
- name: Setup environment
run: |
mkdir ${{ github.workspace }}/covdata
echo "COVERAGE=true" >> $GITHUB_ENV
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
- name: Build and package
run: |
@@ -152,6 +187,9 @@ jobs:
run: k3kcli -v
- name: Run cli tests
env:
K3K_DOCKER_INSTALL: "true"
K3S_HOST_VERSION: "${{ env.K3S_HOST_VERSION }}"
run: make test-cli
- name: Convert coverage data

View File

@@ -83,7 +83,7 @@ generate: ## Generate the CRDs specs
docs: ## Build the CRDs and CLI docs
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml \
--renderer=markdown \
--source-path=./pkg/apis/k3k.io/v1alpha1 \
--source-path=./pkg/apis/k3k.io/v1beta1 \
--output-path=./docs/crds/crd-docs.md
@go run ./docs/cli/genclidoc.go
@@ -105,10 +105,10 @@ validate: generate docs fmt ## Validate the project checking for any dependency
.PHONY: install
install: ## Install K3k with Helm on the targeted Kubernetes cluster
helm upgrade --install --namespace k3k-system --create-namespace \
--set image.repository=$(REPO)/k3k \
--set image.tag=$(VERSION) \
--set sharedAgent.image.repository=$(REPO)/k3k-kubelet \
--set sharedAgent.image.tag=$(VERSION) \
--set controller.image.repository=$(REPO)/k3k \
--set controller.image.tag=$(VERSION) \
--set agent.shared.image.repository=$(REPO)/k3k-kubelet \
--set agent.shared.image.tag=$(VERSION) \
k3k ./charts/k3k/
.PHONY: help

View File

@@ -71,7 +71,7 @@ To install it, simply download the latest available version for your architectur
For example, you can download the Linux amd64 version with:
```
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.3/k3kcli-linux-amd64 && \
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.5/k3kcli-linux-amd64 && \
chmod +x k3kcli && \
sudo mv k3kcli /usr/local/bin
```
@@ -79,7 +79,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.3/k3kcli-l
You should now be able to run:
```bash
-> % k3kcli --version
k3kcli Version: v0.3.3
k3kcli version v0.3.5
```
@@ -135,7 +135,7 @@ You can also directly create a Cluster resource in some namespace, to create a K
```bash
kubectl apply -f - <<EOF
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.3.4-rc2
appVersion: v0.3.4-rc2
version: 0.3.5
appVersion: v0.3.5

View File

@@ -24,7 +24,7 @@ spec:
- jsonPath: .status.policyName
name: Policy
type: string
name: v1alpha1
name: v1beta1
schema:
openAPIV3Schema:
description: |-
@@ -365,6 +365,11 @@ spec:
type: integer
type: object
type: object
x-kubernetes-validations:
- message: ingress, loadbalancer and nodePort are mutually exclusive;
only one can be set
rule: '[has(self.ingress), has(self.loadbalancer), has(self.nodePort)].filter(x,
x).size() <= 1'
mirrorHostNodes:
description: |-
MirrorHostNodes controls whether node objects from the host cluster
@@ -405,7 +410,7 @@ spec:
This field is only relevant in "dynamic" mode.
type: string
storageRequestSize:
default: 1G
default: 2G
description: |-
StorageRequestSize is the requested size for the PVC.
This field is only relevant in "dynamic" mode.
@@ -574,6 +579,108 @@ spec:
x-kubernetes-validations:
- message: serviceCIDR is immutable
rule: self == oldSelf
sync:
default: {}
description: Sync specifies the resources types that will be synced
from virtual cluster to host cluster.
properties:
configmaps:
default:
enabled: true
description: ConfigMaps resources sync configuration.
properties:
enabled:
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
ingresses:
default:
enabled: false
description: Ingresses resources sync configuration.
properties:
enabled:
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
persistentVolumeClaims:
default:
enabled: true
description: PersistentVolumeClaims resources sync configuration.
properties:
enabled:
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
priorityClasses:
default:
enabled: false
description: PriorityClasses resources sync configuration.
properties:
enabled:
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
secrets:
default:
enabled: true
description: Secrets resources sync configuration.
properties:
enabled:
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
services:
default:
enabled: true
description: Services resources sync configuration.
properties:
enabled:
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
type: object
tlsSANs:
description: TLSSANs specifies subject alternative names for the K3s
server certificate.

View File

@@ -20,7 +20,7 @@ spec:
- jsonPath: .spec.allowedMode
name: Mode
type: string
name: v1alpha1
name: v1beta1
schema:
openAPIV3Schema:
description: |-
@@ -225,6 +225,108 @@ spec:
type: array
x-kubernetes-list-type: atomic
type: object
sync:
default: {}
description: Sync specifies the resources types that will be synced
from virtual cluster to host cluster.
properties:
configmaps:
default:
enabled: true
description: ConfigMaps resources sync configuration.
properties:
enabled:
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
ingresses:
default:
enabled: false
description: Ingresses resources sync configuration.
properties:
enabled:
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
persistentVolumeClaims:
default:
enabled: true
description: PersistentVolumeClaims resources sync configuration.
properties:
enabled:
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
priorityClasses:
default:
enabled: false
description: PriorityClasses resources sync configuration.
properties:
enabled:
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
secrets:
default:
enabled: true
description: Secrets resources sync configuration.
properties:
enabled:
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
services:
default:
enabled: true
description: Services resources sync configuration.
properties:
enabled:
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
type: object
type: object
status:
description: Status reflects the observed state of the VirtualClusterPolicy.

View File

@@ -60,3 +60,54 @@ Create the name of the service account to use
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Print the image pull secrets in the expected format (an array of objects with one possible field, "name").
*/}}
{{- define "image.pullSecrets" }}
{{- $imagePullSecrets := list }}
{{- range . }}
{{- if kindIs "string" . }}
{{- $imagePullSecrets = append $imagePullSecrets (dict "name" .) }}
{{- else }}
{{- $imagePullSecrets = append $imagePullSecrets . }}
{{- end }}
{{- end }}
{{- toYaml $imagePullSecrets }}
{{- end }}
{{- define "controller.registry" }}
{{- $registry := .Values.global.imageRegistry | default .Values.controller.image.registry -}}
{{- if $registry }}
{{- $registry }}/
{{- else }}
{{- $registry }}
{{- end }}
{{- end }}
{{- define "server.registry" }}
{{- $registry := .Values.global.imageRegistry | default .Values.server.image.registry -}}
{{- if $registry }}
{{- $registry }}/
{{- else }}
{{- $registry }}
{{- end }}
{{- end }}
{{- define "agent.virtual.registry" }}
{{- $registry := .Values.global.imageRegistry | default .Values.agent.virtual.image.registry -}}
{{- if $registry }}
{{- $registry }}/
{{- else }}
{{- $registry }}
{{- end }}
{{- end }}
{{- define "agent.shared.registry" }}
{{- $registry := .Values.global.imageRegistry | default .Values.agent.shared.image.registry -}}
{{- if $registry }}
{{- $registry }}/
{{- else }}
{{- $registry }}
{{- end }}
{{- end }}

View File

@@ -6,7 +6,7 @@ metadata:
{{- include "k3k.labels" . | nindent 4 }}
namespace: {{ .Release.Namespace }}
spec:
replicas: {{ .Values.image.replicaCount }}
replicas: {{ .Values.controller.replicas }}
selector:
matchLabels:
{{- include "k3k.selectorLabels" . | nindent 6 }}
@@ -15,30 +15,40 @@ spec:
labels:
{{- include "k3k.selectorLabels" . | nindent 8 }}
spec:
imagePullSecrets: {{- include "image.pullSecrets" (concat .Values.controller.imagePullSecrets .Values.global.imagePullSecrets) | nindent 8 }}
containers:
- image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
- image: "{{- include "controller.registry" .}}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
name: {{ .Chart.Name }}
{{- with .Values.controller.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
args:
- k3k
- --cluster-cidr={{ .Values.host.clusterCIDR }}
- --k3s-server-image={{- include "server.registry" .}}{{ .Values.server.image.repository }}
- --k3s-server-image-pull-policy={{ .Values.server.image.pullPolicy }}
- --agent-shared-image={{- include "agent.shared.registry" .}}{{ .Values.agent.shared.image.repository }}:{{ default .Chart.AppVersion .Values.agent.shared.image.tag }}
- --agent-shared-image-pull-policy={{ .Values.agent.shared.image.pullPolicy }}
- --agent-virtual-image={{- include "agent.virtual.registry" .}}{{ .Values.agent.virtual.image.repository }}
- --agent-virtual-image-pull-policy={{ .Values.agent.virtual.image.pullPolicy }}
- --kubelet-port-range={{ .Values.agent.shared.kubeletPortRange }}
- --webhook-port-range={{ .Values.agent.shared.webhookPortRange }}
{{- range $key, $value := include "image.pullSecrets" (concat .Values.agent.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
- --agent-image-pull-secret
- {{ .name }}
{{- end }}
{{- range $key, $value := include "image.pullSecrets" (concat .Values.server.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
- --server-image-pull-secret
- {{ .name }}
{{- end }}
env:
- name: CLUSTER_CIDR
value: {{ .Values.host.clusterCIDR }}
- name: SHARED_AGENT_IMAGE
value: "{{ .Values.sharedAgent.image.repository }}:{{ default .Chart.AppVersion .Values.sharedAgent.image.tag }}"
- name: SHARED_AGENT_PULL_POLICY
value: {{ .Values.sharedAgent.image.pullPolicy }}
- name: K3S_IMAGE
value: {{ .Values.k3sServer.image.repository }}
- name: K3S_IMAGE_PULL_POLICY
value: {{ .Values.k3sServer.image.pullPolicy }}
- name: KUBELET_PORT_RANGE
value: {{ .Values.sharedAgent.kubeletPortRange }}
- name: WEBHOOK_PORT_RANGE
value: {{ .Values.sharedAgent.webhookPortRange }}
- name: CONTROLLER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.extraEnv }}
{{- with .Values.controller.extraEnv }}
{{- toYaml . | nindent 10 }}
{{- end }}
ports:

View File

@@ -1,31 +1,11 @@
replicaCount: 1
image:
repository: rancher/k3k
tag: ""
pullPolicy: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
# extraEnv allows you to specify additional environment variables for the k3k controller deployment.
# This is useful for passing custom configuration or secrets to the controller.
# For example:
# extraEnv:
# - name: MY_CUSTOM_VAR
# value: "my_custom_value"
# - name: ANOTHER_VAR
# valueFrom:
# secretKeyRef:
# name: my-secret
# key: my-key
extraEnv: []
host:
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set
# the controller will collect the PodCIDRs of all the nodes on the system.
clusterCIDR: ""
global:
# -- Global override for container image registry
imageRegistry: ""
# -- Global override for container image registry pull secrets
imagePullSecrets: []
serviceAccount:
# Specifies whether a service account should be created
@@ -34,18 +14,72 @@ serviceAccount:
# If not set and create is true, a name is generated using the fullname template
name: ""
# configuration related to the shared agent mode in k3k
sharedAgent:
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
kubeletPortRange: "50000-51000"
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
webhookPortRange: "51001-52000"
host:
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set
# the controller will collect the PodCIDRs of all the nodes on the system.
clusterCIDR: ""
controller:
replicas: 1
image:
repository: "rancher/k3k-kubelet"
registry: ""
repository: rancher/k3k
tag: ""
pullPolicy: ""
# image registry configuration related to the k3s server
k3sServer:
imagePullSecrets: []
# extraEnv allows you to specify additional environment variables for the k3k controller deployment.
# This is useful for passing custom configuration or secrets to the controller.
# For example:
# extraEnv:
# - name: MY_CUSTOM_VAR
# value: "my_custom_value"
# - name: ANOTHER_VAR
# valueFrom:
# secretKeyRef:
# name: my-secret
# key: my-key
extraEnv: []
# resources allows you to set resources limits and requests for CPU and Memory
# resources:
# limits:
# cpu: "200m"
# memory: "200Mi"
# requests:
# cpu: "100m"
# memory: "100Mi"
resources: {}
# configuration related to k3s server component in k3k
server:
imagePullSecrets: []
image:
registry:
repository: "rancher/k3s"
pullPolicy: ""
# configuration related to the agent component in k3k
agent:
imagePullSecrets: []
# configuration related to agent in shared mode
shared:
image:
registry: ""
repository: "rancher/k3k-kubelet"
tag: ""
pullPolicy: ""
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
kubeletPortRange: "50000-51000"
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
webhookPortRange: "51001-52000"
# configuration related to agent in virtual mode
virtual:
image:
registry: ""
repository: "rancher/k3s"
pullPolicy: ""

View File

@@ -21,7 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
@@ -78,7 +78,7 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
return errors.New("invalid cluster name")
}
if config.mode == string(v1alpha1.SharedClusterMode) && config.agents != 0 {
if config.mode == string(v1beta1.SharedClusterMode) && config.agents != 0 {
return errors.New("invalid flag, --agents flag is only allowed in virtual mode")
}
@@ -114,8 +114,8 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
cluster := newCluster(name, namespace, config)
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
cluster.Spec.Expose = &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{},
}
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
@@ -169,17 +169,17 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
}
}
func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster {
cluster := &v1alpha1.Cluster{
func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "k3k.io/v1alpha1",
APIVersion: "k3k.io/v1beta1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Servers: ptr.To(int32(config.servers)),
Agents: ptr.To(int32(config.agents)),
ClusterCIDR: config.clusterCIDR,
@@ -189,9 +189,9 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
ServerEnvs: env(config.serverEnvs),
AgentEnvs: env(config.agentEnvs),
Version: config.version,
Mode: v1alpha1.ClusterMode(config.mode),
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.PersistenceMode(config.persistenceType),
Mode: v1beta1.ClusterMode(config.mode),
Persistence: v1beta1.PersistenceConfig{
Type: v1beta1.PersistenceMode(config.persistenceType),
StorageClassName: ptr.To(config.storageClassName),
StorageRequestSize: config.storageRequestSize,
},
@@ -210,25 +210,25 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
}
if config.customCertsPath != "" {
cluster.Spec.CustomCAs = v1alpha1.CustomCAs{
cluster.Spec.CustomCAs = v1beta1.CustomCAs{
Enabled: true,
Sources: v1alpha1.CredentialSources{
ClientCA: v1alpha1.CredentialSource{
Sources: v1beta1.CredentialSources{
ClientCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "client-ca"),
},
ServerCA: v1alpha1.CredentialSource{
ServerCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "server-ca"),
},
ETCDServerCA: v1alpha1.CredentialSource{
ETCDServerCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-server-ca"),
},
ETCDPeerCA: v1alpha1.CredentialSource{
ETCDPeerCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-peer-ca"),
},
RequestHeaderCA: v1alpha1.CredentialSource{
RequestHeaderCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "request-header-ca"),
},
ServiceAccountToken: v1alpha1.CredentialSource{
ServiceAccountToken: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "service-account-token"),
},
},
@@ -256,7 +256,7 @@ func env(envSlice []string) []v1.EnvVar {
return envVars
}
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1alpha1.Cluster) error {
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1beta1.Cluster) error {
interval := 5 * time.Second
timeout := 2 * time.Minute
@@ -267,12 +267,12 @@ func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1alp
}
// If resource ready -> stop polling
if cluster.Status.Phase == v1alpha1.ClusterReady {
if cluster.Status.Phase == v1beta1.ClusterReady {
return true, nil
}
// If resource failed -> stop polling with an error
if cluster.Status.Phase == v1alpha1.ClusterFailed {
if cluster.Status.Phase == v1beta1.ClusterFailed {
return true, fmt.Errorf("cluster creation failed: %s", cluster.Status.Phase)
}

View File

@@ -6,7 +6,7 @@ import (
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
@@ -16,7 +16,7 @@ func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
cmd.Flags().StringVar(&cfg.clusterCIDR, "cluster-cidr", "", "cluster CIDR")
cmd.Flags().StringVar(&cfg.serviceCIDR, "service-cidr", "", "service CIDR")
cmd.Flags().BoolVar(&cfg.mirrorHostNodes, "mirror-host-nodes", false, "Mirror Host Cluster Nodes")
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1alpha1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral, static)")
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1beta1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral, static)")
cmd.Flags().StringVar(&cfg.storageClassName, "storage-class-name", "", "storage class name for dynamic persistence type")
cmd.Flags().StringVar(&cfg.storageRequestSize, "storage-request-size", "", "storage size for dynamic persistence type")
cmd.Flags().StringSliceVar(&cfg.serverArgs, "server-args", []string{}, "servers extra arguments")
@@ -36,8 +36,8 @@ func validateCreateConfig(cfg *CreateConfig) error {
}
if cfg.persistenceType != "" {
switch v1alpha1.PersistenceMode(cfg.persistenceType) {
case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode:
switch v1beta1.PersistenceMode(cfg.persistenceType) {
case v1beta1.EphemeralPersistenceMode, v1beta1.DynamicPersistenceMode:
return nil
default:
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
@@ -50,7 +50,7 @@ func validateCreateConfig(cfg *CreateConfig) error {
if cfg.mode != "" {
switch cfg.mode {
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
case string(v1beta1.VirtualClusterMode), string(v1beta1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)

View File

@@ -14,7 +14,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
@@ -50,7 +50,7 @@ func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
logrus.Infof("Deleting [%s] cluster in namespace [%s]", name, namespace)
cluster := v1alpha1.Cluster{
cluster := v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
@@ -86,7 +86,7 @@ func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
}
}
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1alpha1.Cluster) error {
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1beta1.Cluster) error {
var secret v1.Secret
key := types.NamespacedName{

View File

@@ -10,7 +10,7 @@ import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func NewClusterListCmd(appCtx *AppContext) *cobra.Command {
@@ -32,7 +32,7 @@ func list(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
var clusters v1alpha1.ClusterList
var clusters v1beta1.ClusterList
if err := client.List(ctx, &clusters, ctrlclient.InNamespace(appCtx.namespace)); err != nil {
return err
}

View File

@@ -18,7 +18,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
@@ -83,7 +83,7 @@ func generate(appCtx *AppContext, cfg *GenerateKubeconfigConfig) func(cmd *cobra
Namespace: appCtx.Namespace(cfg.name),
}
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := client.Get(ctx, clusterKey, &cluster); err != nil {
return err
@@ -128,7 +128,7 @@ func generate(appCtx *AppContext, cfg *GenerateKubeconfigConfig) func(cmd *cobra
}
}
func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config, configName string) error {
func writeKubeconfigFile(cluster *v1beta1.Cluster, kubeconfig *clientcmdapi.Config, configName string) error {
if configName == "" {
configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml"
}

View File

@@ -13,7 +13,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/policy"
)
@@ -30,7 +30,7 @@ func NewPolicyCreateCmd(appCtx *AppContext) *cobra.Command {
Example: "k3kcli policy create [command options] NAME",
PreRunE: func(cmd *cobra.Command, args []string) error {
switch config.mode {
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
case string(v1beta1.VirtualClusterMode), string(v1beta1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)
@@ -51,7 +51,7 @@ func policyCreateAction(appCtx *AppContext, config *VirtualClusterPolicyCreateCo
client := appCtx.Client
policyName := args[0]
_, err := createPolicy(ctx, client, v1alpha1.ClusterMode(config.mode), policyName)
_, err := createPolicy(ctx, client, v1beta1.ClusterMode(config.mode), policyName)
return err
}
@@ -81,18 +81,18 @@ func createNamespace(ctx context.Context, client client.Client, name, policyName
return nil
}
func createPolicy(ctx context.Context, client client.Client, mode v1alpha1.ClusterMode, policyName string) (*v1alpha1.VirtualClusterPolicy, error) {
func createPolicy(ctx context.Context, client client.Client, mode v1beta1.ClusterMode, policyName string) (*v1beta1.VirtualClusterPolicy, error) {
logrus.Infof("Creating policy [%s]", policyName)
policy := &v1alpha1.VirtualClusterPolicy{
policy := &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: policyName,
},
TypeMeta: metav1.TypeMeta{
Kind: "VirtualClusterPolicy",
APIVersion: "k3k.io/v1alpha1",
APIVersion: "k3k.io/v1beta1",
},
Spec: v1alpha1.VirtualClusterPolicySpec{
Spec: v1beta1.VirtualClusterPolicySpec{
AllowedMode: mode,
},
}

View File

@@ -8,7 +8,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func NewPolicyDeleteCmd(appCtx *AppContext) *cobra.Command {
@@ -27,7 +27,7 @@ func policyDeleteAction(appCtx *AppContext) func(cmd *cobra.Command, args []stri
client := appCtx.Client
name := args[0]
policy := &v1alpha1.VirtualClusterPolicy{}
policy := &v1beta1.VirtualClusterPolicy{}
policy.Name = name
if err := client.Delete(ctx, policy); err != nil {

View File

@@ -9,7 +9,7 @@ import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func NewPolicyListCmd(appCtx *AppContext) *cobra.Command {
@@ -27,7 +27,7 @@ func policyList(appCtx *AppContext) func(cmd *cobra.Command, args []string) erro
ctx := context.Background()
client := appCtx.Client
var policies v1alpha1.VirtualClusterPolicyList
var policies v1beta1.VirtualClusterPolicyList
if err := client.List(ctx, &policies); err != nil {
return err
}

View File

@@ -16,7 +16,7 @@ import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/buildinfo"
)
@@ -51,7 +51,7 @@ func NewRootCmd() *cobra.Command {
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
_ = v1alpha1.AddToScheme(scheme)
_ = v1beta1.AddToScheme(scheme)
_ = apiextensionsv1.AddToScheme(scheme)
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})

View File

@@ -25,7 +25,7 @@ func getPrinterColumnsFromCRD(crd *apiextensionsv1.CustomResourceDefinition) []a
}
for _, version := range crd.Spec.Versions {
if version.Name == "v1alpha1" {
if version.Name == "v1beta1" {
printerColumns = append(printerColumns, version.AdditionalPrinterColumns...)
break
}

View File

@@ -6,7 +6,7 @@ This document provides advanced usage information for k3k, including detailed us
The `Cluster` resource provides a variety of fields for customizing the behavior of your virtual clusters. You can check the [CRD documentation](./crds/crd-docs.md) for the full specs.
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/cli-docs.md) documentation for more details.
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/k3kcli.md) documentation for more details.
@@ -22,7 +22,7 @@ This example creates a "shared" mode K3k cluster with:
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: my-virtual-cluster

View File

@@ -1,10 +1,10 @@
# API Reference
## Packages
- [k3k.io/v1alpha1](#k3kiov1alpha1)
- [k3k.io/v1beta1](#k3kiov1beta1)
## k3k.io/v1alpha1
## k3k.io/v1beta1
### Resource Types
@@ -47,7 +47,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `Cluster` | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
@@ -65,7 +65,7 @@ ClusterList is a list of Cluster resources.
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `ClusterList` | | |
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `items` _[Cluster](#cluster) array_ | | | |
@@ -134,10 +134,28 @@ _Appears in:_
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
| `mirrorHostNodes` _boolean_ | MirrorHostNodes controls whether node objects from the host cluster<br />are mirrored into the virtual cluster. | | |
| `customCAs` _[CustomCAs](#customcas)_ | CustomCAs specifies the cert/key pairs for custom CA certificates. | | |
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |
#### ConfigMapSyncConfig
ConfigMapSyncConfig specifies the sync options for services.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### CredentialSource
@@ -229,6 +247,23 @@ _Appears in:_
| `ingressClassName` _string_ | IngressClassName specifies the IngressClass to use for the Ingress. | | |
#### IngressSyncConfig
IngressSyncConfig specifies the sync options for services.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### LoadBalancerConfig
@@ -278,7 +313,7 @@ _Appears in:_
| --- | --- | --- | --- |
| `type` _[PersistenceMode](#persistencemode)_ | Type specifies the persistence mode. | dynamic | |
| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 1G | |
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 2G | |
#### PersistenceMode
@@ -294,6 +329,23 @@ _Appears in:_
#### PersistentVolumeClaimSyncConfig
PersistentVolumeClaimSyncConfig specifies the sync options for services.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### PodSecurityAdmissionLevel
_Underlying type:_ _string_
@@ -308,6 +360,79 @@ _Appears in:_
#### PriorityClassSyncConfig
PriorityClassSyncConfig specifies the sync options for services.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### SecretSyncConfig
SecretSyncConfig specifies the sync options for services.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### ServiceSyncConfig
ServiceSyncConfig specifies the sync options for services.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### SyncConfig
SyncConfig will contain the resources that should be synced from virtual cluster to host cluster.
_Appears in:_
- [ClusterSpec](#clusterspec)
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `services` _[ServiceSyncConfig](#servicesyncconfig)_ | Services resources sync configuration. | \{ enabled:true \} | |
| `configmaps` _[ConfigMapSyncConfig](#configmapsyncconfig)_ | ConfigMaps resources sync configuration. | \{ enabled:true \} | |
| `secrets` _[SecretSyncConfig](#secretsyncconfig)_ | Secrets resources sync configuration. | \{ enabled:true \} | |
| `ingresses` _[IngressSyncConfig](#ingresssyncconfig)_ | Ingresses resources sync configuration. | \{ enabled:false \} | |
| `persistentVolumeClaims` _[PersistentVolumeClaimSyncConfig](#persistentvolumeclaimsyncconfig)_ | PersistentVolumeClaims resources sync configuration. | \{ enabled:true \} | |
| `priorityClasses` _[PriorityClassSyncConfig](#priorityclasssyncconfig)_ | PriorityClasses resources sync configuration. | \{ enabled:false \} | |
#### VirtualClusterPolicy
@@ -322,7 +447,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `VirtualClusterPolicy` | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[VirtualClusterPolicySpec](#virtualclusterpolicyspec)_ | Spec defines the desired state of the VirtualClusterPolicy. | \{ \} | |
@@ -340,7 +465,7 @@ VirtualClusterPolicyList is a list of VirtualClusterPolicy resources.
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `VirtualClusterPolicyList` | | |
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `items` _[VirtualClusterPolicy](#virtualclusterpolicy) array_ | | | |
@@ -366,6 +491,7 @@ _Appears in:_
| `allowedMode` _[ClusterMode](#clustermode)_ | AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared". | shared | Enum: [shared virtual] <br /> |
| `disableNetworkPolicy` _boolean_ | DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation. | | |
| `podSecurityAdmissionLevel` _[PodSecurityAdmissionLevel](#podsecurityadmissionlevel)_ | PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace. | | Enum: [privileged baseline restricted] <br /> |
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |

View File

@@ -130,7 +130,7 @@ Create then the virtual cluster exposing through NodePort one of the ports that
```bash
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster

View File

@@ -32,18 +32,27 @@ Load these images into your internal (air-gapped) registry.
Update the `values.yaml` file in the K3k Helm chart with air gap settings:
```yaml
image:
repository: rancher/k3k
tag: "" # Specify the version tag
pullPolicy: "" # Optional: "IfNotPresent", "Always", etc.
sharedAgent:
controller:
imagePullSecrets: [] # Optional
image:
repository: rancher/k3k-kubelet
tag: "" # Specify the version tag
pullPolicy: "" # Optional
repository: rancher/k3k
tag: "" # Specify the version tag
pullPolicy: "" # Optional: "IfNotPresent", "Always", etc.
k3sServer:
agent:
imagePullSecrets: []
virtual:
image:
repository: rancher/k3s
pullPolicy: "" # Optional
shared:
image:
repository: rancher/k3k-kubelet
tag: "" # Specify the version tag
pullPolicy: "" # Optional
server:
imagePullSecrets: [] # Optional
image:
repository: rancher/k3s
pullPolicy: "" # Optional

View File

@@ -17,7 +17,7 @@ This guide walks through the various ways to create and manage virtual clusters
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-ingress
@@ -46,7 +46,7 @@ This will create a virtual cluster in `shared` mode and expose it via an ingress
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-persistent
@@ -80,7 +80,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-ha
@@ -105,7 +105,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-virtual
@@ -136,7 +136,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-ephemeral
@@ -162,7 +162,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-custom-k8s
@@ -189,7 +189,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-resourced
@@ -216,7 +216,7 @@ This configures the CPU and memory limit for the virtual cluster.
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-node-placed
@@ -259,7 +259,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-http-proxy

View File

@@ -37,7 +37,7 @@ If you create a `VirtualClusterPolicy` without specifying any `spec` fields (e.g
```yaml
# Example of a minimal VCP (after creation with defaults)
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: my-default-policy
@@ -56,7 +56,7 @@ You can restrict the `mode` (e.g., "shared" or "virtual") in which K3k `Cluster`
**Example:** Allow only "shared" mode clusters.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: shared-only-policy
@@ -74,7 +74,7 @@ You can define resource consumption limits for bound Namespaces by specifying a
**Example:** Set CPU, memory, and pod limits.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: quota-policy
@@ -93,7 +93,7 @@ You can define default resource requests/limits and min/max constraints for cont
**Example:** Define default CPU requests/limits and min/max CPU.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: limit-policy
@@ -118,7 +118,7 @@ By default, K3k creates a `NetworkPolicy` in bound Namespaces to provide network
**Example:** Disable the default NetworkPolicy.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: no-default-netpol-policy
@@ -133,7 +133,7 @@ You can enforce Pod Security Standards (PSS) by specifying a Pod Security Admiss
**Example:** Enforce the "baseline" PSS level.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: baseline-psa-policy

View File

@@ -1,4 +1,4 @@
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: policy-example

View File

@@ -1,4 +1,4 @@
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: example1

View File

@@ -1,4 +1,4 @@
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: single-server

21
go.mod
View File

@@ -11,6 +11,7 @@ replace (
)
require (
github.com/go-logr/logr v1.4.2
github.com/go-logr/zapr v1.3.0
github.com/google/go-cmp v0.7.0
github.com/onsi/ginkgo/v2 v2.21.0
@@ -42,16 +43,6 @@ require (
sigs.k8s.io/controller-runtime v0.19.4
)
require (
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
)
require (
dario.cat/mergo v1.0.1 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
@@ -64,6 +55,7 @@ require (
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
@@ -98,13 +90,13 @@ require (
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@@ -161,6 +153,7 @@ require (
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@@ -171,13 +164,17 @@ require (
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rubenv/sql-migrate v1.7.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/x448/float16 v0.8.4 // indirect
@@ -218,7 +215,7 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/klog/v2 v2.130.1
k8s.io/kms v0.31.4 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
oras.land/oras-go v1.2.5 // indirect

View File

@@ -1,198 +0,0 @@
package controller
import (
"context"
"fmt"
"sync"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
)
const ConfigMapSyncerName = "configmap-syncer"
type ConfigMapSyncer struct {
mutex sync.RWMutex
// VirtualClient is the client for the virtual cluster
VirtualClient client.Client
// CoreClient is the client for the host cluster
HostClient client.Client
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
// representation
TranslateFunc func(*corev1.ConfigMap) (*corev1.ConfigMap, error)
// Logger is the logger that the controller will use
Logger *k3klog.Logger
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
// through add/remove
objs sets.Set[types.NamespacedName]
}
func (c *ConfigMapSyncer) Name() string {
return ConfigMapSyncerName
}
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
if !c.isWatching(req.NamespacedName) {
// return immediately without re-enqueueing. We aren't watching this resource
return reconcile.Result{}, nil
}
var virtual corev1.ConfigMap
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to get configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translated, err := c.TranslateFunc(&virtual)
if err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to translate configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translatedKey := types.NamespacedName{
Namespace: translated.Namespace,
Name: translated.Name,
}
var host corev1.ConfigMap
if err = c.HostClient.Get(ctx, translatedKey, &host); err != nil {
if apierrors.IsNotFound(err) {
err = c.HostClient.Create(ctx, translated)
// for simplicity's sake, we don't check for conflict errors. The existing object will get
// picked up on in the next re-enqueue
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to create host configmap %s/%s for virtual configmap %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host configmap %s/%s: %w", translated.Namespace, translated.Name, err)
}
// we are going to use the host in order to avoid conflicts on update
host.Data = translated.Data
if host.Labels == nil {
host.Labels = make(map[string]string, len(translated.Labels))
}
// we don't want to override labels made on the host cluster by other applications
// but we do need to make sure the labels that the kubelet uses to track host cluster values
// are being tracked appropriately
for key, value := range translated.Labels {
host.Labels[key] = value
}
if err = c.HostClient.Update(ctx, &host); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to update host configmap %s/%s for virtual configmap %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{}, nil
}
// isWatching is a utility method to determine if a key is in objs without the caller needing
// to handle mutex lock/unlock.
func (c *ConfigMapSyncer) isWatching(key types.NamespacedName) bool {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.objs.Has(key)
}
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
// same resource.
func (c *ConfigMapSyncer) AddResource(ctx context.Context, namespace, name string) error {
objKey := types.NamespacedName{
Namespace: namespace,
Name: name,
}
// if we already sync this object, no need to writelock/add it
if c.isWatching(objKey) {
return nil
}
// lock in write mode since we are now adding the key
c.mutex.Lock()
if c.objs == nil {
c.objs = sets.Set[types.NamespacedName]{}
}
c.objs = c.objs.Insert(objKey)
c.mutex.Unlock()
_, err := c.Reconcile(ctx, reconcile.Request{
NamespacedName: objKey,
})
if err != nil {
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
}
return nil
}
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
// removed resource.
func (c *ConfigMapSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
objKey := types.NamespacedName{
Namespace: namespace,
Name: name,
}
// if we don't sync this object, no need to writelock/add it
if !c.isWatching(objKey) {
return nil
}
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
}, func() error {
return c.removeHostConfigMap(ctx, namespace, name)
}); err != nil {
return fmt.Errorf("unable to remove configmap: %w", err)
}
c.mutex.Lock()
if c.objs == nil {
c.objs = sets.Set[types.NamespacedName]{}
}
c.objs = c.objs.Delete(objKey)
c.mutex.Unlock()
return nil
}
func (c *ConfigMapSyncer) removeHostConfigMap(ctx context.Context, virtualNamespace, virtualName string) error {
var vConfigMap corev1.ConfigMap
key := types.NamespacedName{
Namespace: virtualNamespace,
Name: virtualName,
}
if err := c.VirtualClient.Get(ctx, key, &vConfigMap); err != nil {
return fmt.Errorf("unable to get virtual configmap %s/%s: %w", virtualNamespace, virtualName, err)
}
translated, err := c.TranslateFunc(&vConfigMap)
if err != nil {
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
}
return c.HostClient.Delete(ctx, translated)
}

View File

@@ -1,135 +0,0 @@
package controller
import (
"context"
"fmt"
"sync"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/k3k-kubelet/translate"
k3klog "github.com/rancher/k3k/pkg/log"
)
type ControllerHandler struct {
sync.RWMutex
// Mgr is the manager used to run new controllers - from the virtual cluster
Mgr manager.Manager
// Scheme is the scheme used to run new controllers - from the virtual cluster
Scheme runtime.Scheme
// HostClient is the client used to communicate with the host cluster
HostClient client.Client
// VirtualClient is the client used to communicate with the virtual cluster
VirtualClient client.Client
// Translator is the translator that will be used to adjust objects before they
// are made on the host cluster
Translator translate.ToHostTranslator
// Logger is the logger that the controller will use to log errors
Logger *k3klog.Logger
// controllers are the controllers which are currently running
controllers map[schema.GroupVersionKind]updateableReconciler
}
// updateableReconciler is a reconciler that only syncs specific resources (by name/namespace). This list can
// be altered through the Add and Remove methods
type updateableReconciler interface {
reconcile.Reconciler
Name() string
AddResource(ctx context.Context, namespace string, name string) error
RemoveResource(ctx context.Context, namespace string, name string) error
}
func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object) error {
c.RLock()
controllers := c.controllers
if controllers != nil {
if r, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]; ok {
err := r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
c.RUnlock()
return err
}
}
// we need to manually lock/unlock since we intned on write locking to add a new controller
c.RUnlock()
var r updateableReconciler
switch obj.(type) {
case *v1.Secret:
r = &SecretSyncer{
HostClient: c.HostClient,
VirtualClient: c.VirtualClient,
// TODO: Need actual function
TranslateFunc: func(s *v1.Secret) (*v1.Secret, error) {
// note that this doesn't do any type safety - fix this
// when generics work
c.Translator.TranslateTo(s)
// Remove service-account-token types when synced to the host
if s.Type == v1.SecretTypeServiceAccountToken {
s.Type = v1.SecretTypeOpaque
}
return s, nil
},
Logger: c.Logger,
}
case *v1.ConfigMap:
r = &ConfigMapSyncer{
HostClient: c.HostClient,
VirtualClient: c.VirtualClient,
// TODO: Need actual function
TranslateFunc: func(s *v1.ConfigMap) (*v1.ConfigMap, error) {
c.Translator.TranslateTo(s)
return s, nil
},
Logger: c.Logger,
}
default:
// TODO: Technically, the configmap/secret syncers are relatively generic, and this
// logic could be used for other types.
return fmt.Errorf("unrecognized type: %T", obj)
}
err := ctrl.NewControllerManagedBy(c.Mgr).
Named(r.Name()).
For(&v1.ConfigMap{}).
Complete(r)
if err != nil {
return fmt.Errorf("unable to start configmap controller: %w", err)
}
c.Lock()
if c.controllers == nil {
c.controllers = map[schema.GroupVersionKind]updateableReconciler{}
}
c.controllers[obj.GetObjectKind().GroupVersionKind()] = r
c.Unlock()
return r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
}
func (c *ControllerHandler) RemoveResource(ctx context.Context, obj client.Object) error {
// since we aren't adding a new controller, we don't need to lock
c.RLock()
ctrl, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]
c.RUnlock()
if !ok {
return fmt.Errorf("no controller found for gvk %s", obj.GetObjectKind().GroupVersionKind())
}
return ctrl.RemoveResource(ctx, obj.GetNamespace(), obj.GetName())
}

View File

@@ -1,120 +0,0 @@
package controller
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
)
const (
pvcController = "pvc-syncer-controller"
pvcFinalizerName = "pvc.k3k.io/finalizer"
)
type PVCReconciler struct {
clusterName string
clusterNamespace string
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
}
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
translator := translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := PVCReconciler{
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
}
return ctrl.NewControllerManagedBy(virtMgr).
Named(pvcController).
For(&v1.PersistentVolumeClaim{}).
Complete(&reconciler)
}
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
virtPVC v1.PersistentVolumeClaim
cluster v1alpha1.Cluster
)
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedPVC := r.pvc(&virtPVC)
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostScheme); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtPVC.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
if err := r.hostClient.Delete(ctx, syncedPVC); !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// remove the finalizer after cleaning up the synced service
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
return reconcile.Result{}, err
}
}
// create the pvc on host
log.Info("creating the persistent volume for the first time on the host cluster")
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
// handled by the host cluster.
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.hostClient.Create(ctx, syncedPVC))
}
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
hostPVC := obj.DeepCopy()
r.Translator.TranslateTo(hostPVC)
return hostPVC
}

View File

@@ -1,197 +0,0 @@
package controller
import (
"context"
"fmt"
"sync"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
)
const SecretSyncerName = "secret-syncer"
type SecretSyncer struct {
mutex sync.RWMutex
// VirtualClient is the client for the virtual cluster
VirtualClient client.Client
// CoreClient is the client for the host cluster
HostClient client.Client
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
// representation
TranslateFunc func(*corev1.Secret) (*corev1.Secret, error)
// Logger is the logger that the controller will use
Logger *k3klog.Logger
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
// through add/remove
objs sets.Set[types.NamespacedName]
}
func (s *SecretSyncer) Name() string {
return SecretSyncerName
}
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
if !s.isWatching(req.NamespacedName) {
// return immediately without re-enqueueing. We aren't watching this resource
return reconcile.Result{}, nil
}
var virtual corev1.Secret
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to get secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translated, err := s.TranslateFunc(&virtual)
if err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to translate secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translatedKey := types.NamespacedName{
Namespace: translated.Namespace,
Name: translated.Name,
}
var host corev1.Secret
if err = s.HostClient.Get(ctx, translatedKey, &host); err != nil {
if apierrors.IsNotFound(err) {
err = s.HostClient.Create(ctx, translated)
// for simplicity's sake, we don't check for conflict errors. The existing object will get
// picked up on in the next re-enqueue
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to create host secret %s/%s for virtual secret %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host secret %s/%s: %w", translated.Namespace, translated.Name, err)
}
// we are going to use the host in order to avoid conflicts on update
host.Data = translated.Data
if host.Labels == nil {
host.Labels = make(map[string]string, len(translated.Labels))
}
// we don't want to override labels made on the host cluster by other applications
// but we do need to make sure the labels that the kubelet uses to track host cluster values
// are being tracked appropriately
for key, value := range translated.Labels {
host.Labels[key] = value
}
if err = s.HostClient.Update(ctx, &host); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to update host secret %s/%s for virtual secret %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{}, nil
}
// isWatching is a utility method to determine if a key is in objs without the caller needing
// to handle mutex lock/unlock.
func (s *SecretSyncer) isWatching(key types.NamespacedName) bool {
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.objs.Has(key)
}
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
// same resource.
func (s *SecretSyncer) AddResource(ctx context.Context, namespace, name string) error {
objKey := types.NamespacedName{
Namespace: namespace,
Name: name,
}
// if we already sync this object, no need to writelock/add it
if s.isWatching(objKey) {
return nil
}
// lock in write mode since we are now adding the key
s.mutex.Lock()
if s.objs == nil {
s.objs = sets.Set[types.NamespacedName]{}
}
s.objs = s.objs.Insert(objKey)
s.mutex.Unlock()
_, err := s.Reconcile(ctx, reconcile.Request{
NamespacedName: objKey,
})
if err != nil {
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
}
return nil
}
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
// removed resource.
func (s *SecretSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
objKey := types.NamespacedName{
Namespace: namespace,
Name: name,
}
// if we don't sync this object, no need to writelock/add it
if !s.isWatching(objKey) {
return nil
}
// lock in write mode since we are now adding the key
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
}, func() error {
return s.removeHostSecret(ctx, namespace, name)
}); err != nil {
return fmt.Errorf("unable to remove secret: %w", err)
}
s.mutex.Lock()
if s.objs == nil {
s.objs = sets.Set[types.NamespacedName]{}
}
s.objs = s.objs.Delete(objKey)
s.mutex.Unlock()
return nil
}
func (s *SecretSyncer) removeHostSecret(ctx context.Context, virtualNamespace, virtualName string) error {
var vSecret corev1.Secret
err := s.VirtualClient.Get(ctx, types.NamespacedName{
Namespace: virtualNamespace,
Name: virtualName,
}, &vSecret)
if err != nil {
return fmt.Errorf("unable to get virtual secret %s/%s: %w", virtualNamespace, virtualName, err)
}
translated, err := s.TranslateFunc(&vSecret)
if err != nil {
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
}
return s.HostClient.Delete(ctx, translated)
}

View File

@@ -0,0 +1,150 @@
package syncer
import (
"context"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
configMapControllerName = "configmap-syncer"
configMapFinalizerName = "configmap.k3k.io/finalizer"
)
type ConfigMapSyncer struct {
// SyncerContext contains all client information for host and virtual cluster
*SyncerContext
}
func (c *ConfigMapSyncer) Name() string {
return configMapControllerName
}
// AddConfigMapSyncer adds configmap syncer controller to the manager of the virtual cluster
func AddConfigMapSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
reconciler := ConfigMapSyncer{
SyncerContext: &SyncerContext{
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, configMapControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(name).
For(&corev1.ConfigMap{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
func (c *ConfigMapSyncer) filterResources(object client.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for configMap Sync Config
syncConfig := cluster.Spec.Sync.ConfigMaps
// If syncing is disabled, only process deletions to allow for cleanup.
if !syncConfig.Enabled {
return object.GetDeletionTimestamp() != nil
}
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
if labelSelector.Empty() {
return true
}
return labelSelector.Matches(labels.Set(object.GetLabels()))
}
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", c.ClusterName, "clusterNamespace", c.ClusterName)
ctx = ctrl.LoggerInto(ctx, log)
var cluster v1beta1.Cluster
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
var virtualConfigMap corev1.ConfigMap
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtualConfigMap); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
syncedConfigMap := c.translateConfigMap(&virtualConfigMap)
// handle deletion
if !virtualConfigMap.DeletionTimestamp.IsZero() {
// deleting the synced configMap if exist
if err := c.HostClient.Delete(ctx, syncedConfigMap); err != nil && !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// remove the finalizer after cleaning up the synced configMap
if controllerutil.RemoveFinalizer(&virtualConfigMap, configMapFinalizerName) {
if err := c.VirtualClient.Update(ctx, &virtualConfigMap); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&virtualConfigMap, configMapFinalizerName) {
if err := c.VirtualClient.Update(ctx, &virtualConfigMap); err != nil {
return reconcile.Result{}, err
}
}
var hostConfigMap corev1.ConfigMap
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: syncedConfigMap.Name, Namespace: syncedConfigMap.Namespace}, &hostConfigMap); err != nil {
if apierrors.IsNotFound(err) {
log.Info("creating the ConfigMap for the first time on the host cluster")
return reconcile.Result{}, c.HostClient.Create(ctx, syncedConfigMap)
}
return reconcile.Result{}, err
}
// TODO: Add option to keep labels/annotation set by the host cluster
log.Info("updating ConfigMap on the host cluster")
return reconcile.Result{}, c.HostClient.Update(ctx, syncedConfigMap)
}
// translateConfigMap will translate a given configMap created in the virtual cluster and
// translates it to host cluster object
func (c *ConfigMapSyncer) translateConfigMap(configMap *corev1.ConfigMap) *corev1.ConfigMap {
hostConfigMap := configMap.DeepCopy()
c.Translator.TranslateTo(hostConfigMap)
return hostConfigMap
}

View File

@@ -0,0 +1,243 @@
package syncer_test
import (
"context"
"fmt"
"time"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var ConfigMapTests = func() {
var (
namespace string
cluster v1beta1.Cluster
)
BeforeEach(func() {
ctx := context.Background()
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
}
err := hostTestEnv.k8sClient.Create(ctx, &ns)
Expect(err).NotTo(HaveOccurred())
namespace = ns.Name
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
ConfigMaps: v1beta1.ConfigMapSyncConfig{
Enabled: true,
},
},
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
err = syncer.AddConfigMapSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
Expect(err).NotTo(HaveOccurred())
})
It("creates a ConfigMap on the host cluster", func() {
ctx := context.Background()
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cm-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Data: map[string]string{
"foo": "bar",
},
}
err := virtTestEnv.k8sClient.Create(ctx, configMap)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
var hostConfigMap v1.ConfigMap
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Configmap %s in host cluster", hostConfigMapName))
Expect(hostConfigMap.Data).To(Equal(configMap.Data))
Expect(hostConfigMap.Labels).To(ContainElement("bar"))
GinkgoWriter.Printf("labels: %v\n", hostConfigMap.Labels)
})
It("updates a ConfigMap on the host cluster", func() {
ctx := context.Background()
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cm-",
Namespace: "default",
},
Data: map[string]string{
"foo": "bar",
},
}
err := virtTestEnv.k8sClient.Create(ctx, configMap)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
var hostConfigMap v1.ConfigMap
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created configmap %s in host cluster", hostConfigMapName))
Expect(hostConfigMap.Data).To(Equal(configMap.Data))
Expect(hostConfigMap.Labels).NotTo(ContainElement("bar"))
key := client.ObjectKeyFromObject(configMap)
err = virtTestEnv.k8sClient.Get(ctx, key, configMap)
Expect(err).NotTo(HaveOccurred())
configMap.Labels = map[string]string{"foo": "bar"}
// update virtual configmap
err = virtTestEnv.k8sClient.Update(ctx, configMap)
Expect(err).NotTo(HaveOccurred())
Expect(configMap.Labels).To(ContainElement("bar"))
err = virtTestEnv.k8sClient.Get(ctx, key, configMap)
// check hostConfigMap
Eventually(func() map[string]string {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
Expect(err).NotTo(HaveOccurred())
return hostConfigMap.Labels
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(ContainElement("bar"))
})
It("deletes a configMap on the host cluster", func() {
ctx := context.Background()
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cm-",
Namespace: "default",
},
Data: map[string]string{
"foo": "bar",
},
}
err := virtTestEnv.k8sClient.Create(ctx, configMap)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
var hostConfigMap v1.ConfigMap
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created configmap %s in host cluster", hostConfigMapName))
Expect(hostConfigMap.Data).To(Equal(hostConfigMap.Data))
err = virtTestEnv.k8sClient.Delete(ctx, configMap)
Expect(err).NotTo(HaveOccurred())
Eventually(func() bool {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
It("will not sync a configMap if disabled", func() {
ctx := context.Background()
cluster.Spec.Sync.ConfigMaps.Enabled = false
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cm-",
Namespace: "default",
},
Data: map[string]string{
"foo": "bar",
},
}
err = virtTestEnv.k8sClient.Create(ctx, configMap)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
var hostConfigMap v1.ConfigMap
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
GinkgoWriter.Printf("error: %v", err)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
}

View File

@@ -0,0 +1,161 @@
package syncer
import (
"context"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
ingressControllerName = "ingress-syncer-controller"
ingressFinalizerName = "ingress.k3k.io/finalizer"
)
type IngressReconciler struct {
*SyncerContext
}
// AddIngressSyncer adds ingress syncer controller to the manager of the virtual cluster
func AddIngressSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
reconciler := IngressReconciler{
SyncerContext: &SyncerContext{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, ingressControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(name).
For(&networkingv1.Ingress{}).
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
func (r *IngressReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for ingressConfig
syncConfig := cluster.Spec.Sync.Ingresses
// If syncing is disabled, only process deletions to allow for cleanup.
if !syncConfig.Enabled {
return object.GetDeletionTimestamp() != nil
}
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
if labelSelector.Empty() {
return true
}
return labelSelector.Matches(labels.Set(object.GetLabels()))
}
func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
log.Info("reconciling ingress object")
var (
virtIngress networkingv1.Ingress
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtIngress); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedIngress := r.ingress(&virtIngress)
if err := controllerutil.SetControllerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtIngress.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
if err := r.HostClient.Delete(ctx, syncedIngress); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// remove the finalizer after cleaning up the synced service
if controllerutil.RemoveFinalizer(&virtIngress, ingressFinalizerName) {
if err := r.VirtualClient.Update(ctx, &virtIngress); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&virtIngress, ingressFinalizerName) {
if err := r.VirtualClient.Update(ctx, &virtIngress); err != nil {
return reconcile.Result{}, err
}
}
// create or update the ingress on host
var hostIngress networkingv1.Ingress
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: syncedIngress.Name, Namespace: r.ClusterNamespace}, &hostIngress); err != nil {
if apierrors.IsNotFound(err) {
log.Info("creating the ingress for the first time on the host cluster")
return reconcile.Result{}, r.HostClient.Create(ctx, syncedIngress)
}
return reconcile.Result{}, err
}
log.Info("updating ingress on the host cluster")
return reconcile.Result{}, r.HostClient.Update(ctx, syncedIngress)
}
func (s *IngressReconciler) ingress(obj *networkingv1.Ingress) *networkingv1.Ingress {
hostIngress := obj.DeepCopy()
s.Translator.TranslateTo(hostIngress)
for _, rule := range hostIngress.Spec.Rules {
// modify services in rules to point to the synced services
if rule.HTTP != nil {
for _, path := range rule.HTTP.Paths {
if path.Backend.Service != nil {
path.Backend.Service.Name = s.Translator.TranslateName(obj.GetNamespace(), path.Backend.Service.Name)
}
}
}
}
// don't sync finalizers to the host
return hostIngress
}

View File

@@ -0,0 +1,349 @@
package syncer_test
import (
"context"
"fmt"
"time"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var IngressTests = func() {
var (
namespace string
cluster v1beta1.Cluster
)
BeforeEach(func() {
ctx := context.Background()
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
}
err := hostTestEnv.k8sClient.Create(ctx, &ns)
Expect(err).NotTo(HaveOccurred())
namespace = ns.Name
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
Ingresses: v1beta1.IngressSyncConfig{
Enabled: true,
},
},
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
err = syncer.AddIngressSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
Expect(err).NotTo(HaveOccurred())
})
It("creates a Ingress on the host cluster", func() {
ctx := context.Background()
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ingress-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "test.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
PathType: ptr.To(networkingv1.PathTypePrefix),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test-service",
Port: networkingv1.ServiceBackendPort{
Name: "test-port",
},
},
},
},
},
},
},
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
GinkgoWriter.Printf("labels: %v\n", hostIngress.Labels)
})
It("updates a Ingress on the host cluster", func() {
ctx := context.Background()
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ingress-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "test.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
PathType: ptr.To(networkingv1.PathTypePrefix),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test-service",
Port: networkingv1.ServiceBackendPort{
Name: "test-port",
},
},
},
},
},
},
},
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
key := client.ObjectKeyFromObject(ingress)
err = virtTestEnv.k8sClient.Get(ctx, key, ingress)
Expect(err).NotTo(HaveOccurred())
ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "test-service-updated"
// update virtual ingress
err = virtTestEnv.k8sClient.Update(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
// check hostIngress
Eventually(func() string {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
Expect(err).NotTo(HaveOccurred())
return hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(Equal(translateName(cluster, ingress.Namespace, "test-service-updated")))
})
It("deletes a Ingress on the host cluster", func() {
ctx := context.Background()
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ingress-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "test.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
PathType: ptr.To(networkingv1.PathTypePrefix),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test-service",
Port: networkingv1.ServiceBackendPort{
Name: "test-port",
},
},
},
},
},
},
},
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
err = virtTestEnv.k8sClient.Delete(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
Eventually(func() bool {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
It("will not sync an Ingress if disabled", func() {
ctx := context.Background()
cluster.Spec.Sync.Ingresses.Enabled = false
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ingress-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "test.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
PathType: ptr.To(networkingv1.PathTypePrefix),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test-service",
Port: networkingv1.ServiceBackendPort{
Name: "test-port",
},
},
},
},
},
},
},
},
},
},
}
err = virtTestEnv.k8sClient.Create(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
}

View File

@@ -0,0 +1,138 @@
package syncer
import (
"context"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
pvcControllerName = "pvc-syncer-controller"
pvcFinalizerName = "pvc.k3k.io/finalizer"
)
type PVCReconciler struct {
*SyncerContext
}
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
reconciler := PVCReconciler{
SyncerContext: &SyncerContext{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, pvcControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(name).
For(&v1.PersistentVolumeClaim{}).
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
func (r *PVCReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for pvc config
syncConfig := cluster.Spec.Sync.PersistentVolumeClaims
// If syncing is disabled, only process deletions to allow for cleanup.
if !syncConfig.Enabled {
return object.GetDeletionTimestamp() != nil
}
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
if labelSelector.Empty() {
return true
}
return labelSelector.Matches(labels.Set(object.GetLabels()))
}
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
virtPVC v1.PersistentVolumeClaim
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedPVC := r.pvc(&virtPVC)
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtPVC.DeletionTimestamp.IsZero() {
// deleting the synced pvc if exists
if err := r.HostClient.Delete(ctx, syncedPVC); err != nil && !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// remove the finalizer after cleaning up the synced pvc
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
if err := r.VirtualClient.Update(ctx, &virtPVC); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
if err := r.VirtualClient.Update(ctx, &virtPVC); err != nil {
return reconcile.Result{}, err
}
}
// create the pvc on host
log.Info("creating the persistent volume claim for the first time on the host cluster")
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
// handled by the host cluster.
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.HostClient.Create(ctx, syncedPVC))
}
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
hostPVC := obj.DeepCopy()
r.Translator.TranslateTo(hostPVC)
return hostPVC
}

View File

@@ -0,0 +1,111 @@
package syncer_test
import (
"context"
"fmt"
"time"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var PVCTests = func() {
var (
namespace string
cluster v1beta1.Cluster
)
BeforeEach(func() {
ctx := context.Background()
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
}
err := hostTestEnv.k8sClient.Create(ctx, &ns)
Expect(err).NotTo(HaveOccurred())
namespace = ns.Name
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
PersistentVolumeClaims: v1beta1.PersistentVolumeClaimSyncConfig{
Enabled: true,
},
},
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
err = syncer.AddPVCSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
Expect(err).NotTo(HaveOccurred())
})
It("creates a pvc on the host cluster", func() {
ctx := context.Background()
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: ptr.To("test-sc"),
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadOnlyMany,
},
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse("1G"),
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, pvc)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created PVC %s in virtual cluster", pvc.Name))
var hostPVC v1.PersistentVolumeClaim
hostPVCName := translateName(cluster, pvc.Namespace, pvc.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostPVCName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostPVC)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created PVC %s in host cluster", hostPVCName))
Expect(*hostPVC.Spec.StorageClassName).To(Equal("test-sc"))
GinkgoWriter.Printf("labels: %v\n", hostPVC.Labels)
})
}

View File

@@ -1,12 +1,12 @@
package controller
package syncer
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/component-helpers/storage/volume"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
@@ -15,64 +15,70 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
podController = "pod-pvc-controller"
pseudoPVLabel = "pod.k3k.io/pseudoPV"
podControllerName = "pod-pvc-controller"
pseudoPVLabel = "pod.k3k.io/pseudoPV"
)
type PodReconciler struct {
clusterName string
clusterNamespace string
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
*SyncerContext
}
// AddPodPVCController adds pod controller to k3k-kubelet
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
translator := translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := PodReconciler{
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
SyncerContext: &SyncerContext{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translate.ToHostTranslator{},
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, podControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(podController).
Named(name).
For(&v1.Pod{}).
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
func (r *PodReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for pvc config
syncConfig := cluster.Spec.Sync.PersistentVolumeClaims
// If PVC syncing is disabled, only process deletions to allow for cleanup.
return syncConfig.Enabled || object.GetDeletionTimestamp() != nil
}
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
virtPod v1.Pod
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
@@ -103,14 +109,14 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
Namespace: pod.Namespace,
}
if err := r.virtualClient.Get(ctx, key, &pvc); err != nil {
if err := r.VirtualClient.Get(ctx, key, &pvc); err != nil {
return ctrlruntimeclient.IgnoreNotFound(err)
}
log.Info("Creating pseudo Persistent Volume")
pv := r.pseudoPV(&pvc)
if err := r.virtualClient.Create(ctx, pv); err != nil {
if err := r.VirtualClient.Create(ctx, pv); err != nil {
return ctrlruntimeclient.IgnoreAlreadyExists(err)
}
@@ -119,7 +125,7 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
Phase: v1.VolumeBound,
}
if err := r.virtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
if err := r.VirtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
return err
}
@@ -135,7 +141,7 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
pvcPatch.Status.Phase = v1.ClaimBound
pvcPatch.Status.AccessModes = pvcPatch.Spec.AccessModes
return r.virtualClient.Status().Update(ctx, pvcPatch)
return r.VirtualClient.Status().Update(ctx, pvcPatch)
}
func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVolume {

View File

@@ -1,4 +1,4 @@
package controller_test
package syncer_test
import (
"context"
@@ -12,9 +12,8 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -23,7 +22,7 @@ import (
var PriorityClassTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
BeforeEach(func() {
@@ -37,16 +36,23 @@ var PriorityClassTests = func() {
namespace = ns.Name
cluster = v1alpha1.Cluster{
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
PriorityClasses: v1beta1.PriorityClassSyncConfig{
Enabled: true,
},
},
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
err = controller.AddPriorityClassReconciler(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
err = syncer.AddPriorityClassSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
@@ -215,15 +221,36 @@ var PriorityClassTests = func() {
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
Expect(hostPriorityClass.GlobalDefault).To(BeFalse())
Expect(hostPriorityClass.Annotations[controller.PriorityClassGlobalDefaultAnnotation]).To(Equal("true"))
Expect(hostPriorityClass.Annotations[syncer.PriorityClassGlobalDefaultAnnotation]).To(Equal("true"))
})
It("will not create a priorityClass on the host cluster if disabled", func() {
ctx := context.Background()
cluster.Spec.Sync.PriorityClasses.Enabled = false
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
priorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
Value: 1001,
}
err = virtTestEnv.k8sClient.Create(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostPriorityClassName}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
}
func translateName(cluster v1alpha1.Cluster, namespace, name string) string {
translator := translate.ToHostTranslator{
ClusterName: cluster.Name,
ClusterNamespace: cluster.Namespace,
}
return translator.TranslateName(namespace, name)
}

View File

@@ -1,10 +1,10 @@
package controller
package syncer
import (
"context"
"strings"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
@@ -18,7 +18,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -28,42 +28,32 @@ const (
priorityClassFinalizerName = "priorityclass.k3k.io/finalizer"
)
type PriorityClassReconciler struct {
clusterName string
clusterNamespace string
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
type PriorityClassSyncer struct {
*SyncerContext
}
// AddPriorityClassReconciler adds a PriorityClass reconciler to k3k-kubelet
func AddPriorityClassReconciler(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
translator := translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// AddPriorityClassSyncer adds a PriorityClass reconciler to k3k-kubelet
func AddPriorityClassSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
// initialize a new Reconciler
reconciler := PriorityClassReconciler{
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
reconciler := PriorityClassSyncer{
SyncerContext: &SyncerContext{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
},
}
name := translator.TranslateName("", priorityClassControllerName)
name := reconciler.Translator.TranslateName(clusterNamespace, priorityClassControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(name).
For(&schedulingv1.PriorityClass{}).
WithEventFilter(ignoreSystemPrefixPredicate).
For(&schedulingv1.PriorityClass{}).WithEventFilter(ignoreSystemPrefixPredicate).
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
@@ -83,20 +73,45 @@ var ignoreSystemPrefixPredicate = predicate.Funcs{
},
}
func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
func (r *PriorityClassSyncer) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for priorityClassConfig
syncConfig := cluster.Spec.Sync.PriorityClasses
// If syncing is disabled, only process deletions to allow for cleanup.
if !syncConfig.Enabled {
return object.GetDeletionTimestamp() != nil
}
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
if labelSelector.Empty() {
return true
}
return labelSelector.Matches(labels.Set(object.GetLabels()))
}
func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
priorityClass schedulingv1.PriorityClass
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.virtualClient.Get(ctx, req.NamespacedName, &priorityClass); err != nil {
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &priorityClass); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
@@ -106,13 +121,13 @@ func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.R
if !priorityClass.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
// TODO add test for previous implementation without err != nil check, and also check the other controllers
if err := r.hostClient.Delete(ctx, hostPriorityClass); err != nil && !apierrors.IsNotFound(err) {
if err := r.HostClient.Delete(ctx, hostPriorityClass); err != nil && !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// remove the finalizer after cleaning up the synced service
if controllerutil.RemoveFinalizer(&priorityClass, priorityClassFinalizerName) {
if err := r.virtualClient.Update(ctx, &priorityClass); err != nil {
if err := r.VirtualClient.Update(ctx, &priorityClass); err != nil {
return reconcile.Result{}, err
}
}
@@ -122,7 +137,7 @@ func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.R
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&priorityClass, priorityClassFinalizerName) {
if err := r.virtualClient.Update(ctx, &priorityClass); err != nil {
if err := r.VirtualClient.Update(ctx, &priorityClass); err != nil {
return reconcile.Result{}, err
}
}
@@ -130,19 +145,19 @@ func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.R
// create the priorityClass on the host
log.Info("creating the priorityClass for the first time on the host cluster")
err := r.hostClient.Create(ctx, hostPriorityClass)
err := r.HostClient.Create(ctx, hostPriorityClass)
if err != nil {
if !apierrors.IsAlreadyExists(err) {
return reconcile.Result{}, err
}
return reconcile.Result{}, r.hostClient.Update(ctx, hostPriorityClass)
return reconcile.Result{}, r.HostClient.Update(ctx, hostPriorityClass)
}
return reconcile.Result{}, nil
}
func (r *PriorityClassReconciler) translatePriorityClass(priorityClass schedulingv1.PriorityClass) *schedulingv1.PriorityClass {
func (r *PriorityClassSyncer) translatePriorityClass(priorityClass schedulingv1.PriorityClass) *schedulingv1.PriorityClass {
hostPriorityClass := priorityClass.DeepCopy()
r.Translator.TranslateTo(hostPriorityClass)

View File

@@ -0,0 +1,155 @@
package syncer
import (
"context"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
secretControllerName = "secret-syncer"
secretFinalizerName = "secret.k3k.io/finalizer"
)
type SecretSyncer struct {
// SyncerContext contains all client information for host and virtual cluster
*SyncerContext
}
func (s *SecretSyncer) Name() string {
return secretControllerName
}
// AddSecretSyncer adds secret syncer controller to the manager of the virtual cluster
func AddSecretSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
reconciler := SecretSyncer{
SyncerContext: &SyncerContext{
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, secretControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(name).
For(&v1.Secret{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
func (r *SecretSyncer) filterResources(object client.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for Secrets Sync Config
syncConfig := cluster.Spec.Sync.Secrets
// If syncing is disabled, only process deletions to allow for cleanup.
if !syncConfig.Enabled {
return object.GetDeletionTimestamp() != nil
}
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
if labelSelector.Empty() {
return true
}
return labelSelector.Matches(labels.Set(object.GetLabels()))
}
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", s.ClusterName, "clusterNamespace", s.ClusterName)
ctx = ctrl.LoggerInto(ctx, log)
var cluster v1beta1.Cluster
if err := s.HostClient.Get(ctx, types.NamespacedName{Name: s.ClusterName, Namespace: s.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
var virtualSecret v1.Secret
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtualSecret); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
syncedSecret := s.translateSecret(&virtualSecret)
// handle deletion
if !virtualSecret.DeletionTimestamp.IsZero() {
// deleting the synced secret if exist
if err := s.HostClient.Delete(ctx, syncedSecret); err != nil && !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// remove the finalizer after cleaning up the synced secret
if controllerutil.RemoveFinalizer(&virtualSecret, secretFinalizerName) {
if err := s.VirtualClient.Update(ctx, &virtualSecret); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&virtualSecret, secretFinalizerName) {
if err := s.VirtualClient.Update(ctx, &virtualSecret); err != nil {
return reconcile.Result{}, err
}
}
var hostSecret v1.Secret
if err := s.HostClient.Get(ctx, types.NamespacedName{Name: syncedSecret.Name, Namespace: syncedSecret.Namespace}, &hostSecret); err != nil {
if apierrors.IsNotFound(err) {
log.Info("creating the Secret for the first time on the host cluster")
return reconcile.Result{}, s.HostClient.Create(ctx, syncedSecret)
}
return reconcile.Result{}, err
}
// TODO: Add option to keep labels/annotation set by the host cluster
log.Info("updating Secret on the host cluster")
return reconcile.Result{}, s.HostClient.Update(ctx, syncedSecret)
}
// translateSecret will translate a given secret created in the virtual cluster and
// translates it to host cluster object
func (s *SecretSyncer) translateSecret(secret *v1.Secret) *v1.Secret {
hostSecret := secret.DeepCopy()
if hostSecret.Type == v1.SecretTypeServiceAccountToken {
hostSecret.Type = v1.SecretTypeOpaque
}
s.Translator.TranslateTo(hostSecret)
return hostSecret
}

View File

@@ -0,0 +1,240 @@
package syncer_test
import (
"context"
"fmt"
"time"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var SecretTests = func() {
var (
namespace string
cluster v1beta1.Cluster
)
BeforeEach(func() {
ctx := context.Background()
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
}
err := hostTestEnv.k8sClient.Create(ctx, &ns)
Expect(err).NotTo(HaveOccurred())
namespace = ns.Name
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
Secrets: v1beta1.SecretSyncConfig{
Enabled: true,
},
},
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
err = syncer.AddSecretSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
Expect(err).NotTo(HaveOccurred())
})
It("creates a Secret on the host cluster", func() {
ctx := context.Background()
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "secret-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Data: map[string][]byte{
"foo": []byte("bar"),
},
}
err := virtTestEnv.k8sClient.Create(ctx, secret)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created Secret %s in virtual cluster", secret.Name))
var hostSecret v1.Secret
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Secret %s in host cluster", hostSecretName))
Expect(hostSecret.Data).To(Equal(secret.Data))
Expect(hostSecret.Labels).To(ContainElement("bar"))
GinkgoWriter.Printf("labels: %v\n", hostSecret.Labels)
})
It("updates a Secret on the host cluster", func() {
ctx := context.Background()
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "secret-",
Namespace: "default",
},
Data: map[string][]byte{
"foo": []byte("bar"),
},
}
err := virtTestEnv.k8sClient.Create(ctx, secret)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
var hostSecret v1.Secret
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created secret %s in host cluster", hostSecretName))
Expect(hostSecret.Data).To(Equal(secret.Data))
Expect(hostSecret.Labels).NotTo(ContainElement("bar"))
key := client.ObjectKeyFromObject(secret)
err = virtTestEnv.k8sClient.Get(ctx, key, secret)
Expect(err).NotTo(HaveOccurred())
secret.Labels = map[string]string{"foo": "bar"}
// update virtual secret
err = virtTestEnv.k8sClient.Update(ctx, secret)
Expect(err).NotTo(HaveOccurred())
Expect(secret.Labels).To(ContainElement("bar"))
// check hostSecret
Eventually(func() map[string]string {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
Expect(err).NotTo(HaveOccurred())
return hostSecret.Labels
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(ContainElement("bar"))
})
It("deletes a secret on the host cluster", func() {
ctx := context.Background()
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "secret-",
Namespace: "default",
},
Data: map[string][]byte{
"foo": []byte("bar"),
},
}
err := virtTestEnv.k8sClient.Create(ctx, secret)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
var hostSecret v1.Secret
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created secret %s in host cluster", hostSecretName))
Expect(hostSecret.Data).To(Equal(hostSecret.Data))
err = virtTestEnv.k8sClient.Delete(ctx, secret)
Expect(err).NotTo(HaveOccurred())
Eventually(func() bool {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
It("will not create a secret on the host cluster if disabled", func() {
ctx := context.Background()
cluster.Spec.Sync.Secrets.Enabled = false
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "secret-",
Namespace: "default",
},
Data: map[string][]byte{
"foo": []byte("bar"),
},
}
err = virtTestEnv.k8sClient.Create(ctx, secret)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
var hostSecret v1.Secret
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
}

View File

@@ -1,12 +1,13 @@
package controller
package syncer
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
@@ -15,23 +16,16 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
serviceSyncerController = "service-syncer-controller"
serviceFinalizerName = "service.k3k.io/finalizer"
serviceControllerName = "service-syncer-controller"
serviceFinalizerName = "service.k3k.io/finalizer"
)
type ServiceReconciler struct {
clusterName string
clusterNamespace string
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
*SyncerContext
}
// AddServiceSyncer adds service syncer controller to the manager of the virtual cluster
@@ -42,24 +36,25 @@ func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clu
}
reconciler := ServiceReconciler{
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
SyncerContext: &SyncerContext{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translator,
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, serviceControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(serviceSyncerController).
For(&v1.Service{}).
Named(name).
For(&v1.Service{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
if req.Name == "kubernetes" || req.Name == "kube-dns" {
@@ -68,34 +63,32 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
var (
virtService v1.Service
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedService := r.service(&virtService)
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostScheme); err != nil {
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtService.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
if err := r.hostClient.Delete(ctx, syncedService); err != nil {
if err := r.HostClient.Delete(ctx, syncedService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// remove the finalizer after cleaning up the synced service
if controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName)
if err := r.virtualClient.Update(ctx, &virtService); err != nil {
if controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName) {
if err := r.VirtualClient.Update(ctx, &virtService); err != nil {
return reconcile.Result{}, err
}
}
@@ -104,20 +97,18 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
// Add finalizer if it does not exist
if !controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
controllerutil.AddFinalizer(&virtService, serviceFinalizerName)
if err := r.virtualClient.Update(ctx, &virtService); err != nil {
if controllerutil.AddFinalizer(&virtService, serviceFinalizerName) {
if err := r.VirtualClient.Update(ctx, &virtService); err != nil {
return reconcile.Result{}, err
}
}
// create or update the service on host
var hostService v1.Service
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: r.clusterNamespace}, &hostService); err != nil {
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: r.ClusterNamespace}, &hostService); err != nil {
if apierrors.IsNotFound(err) {
log.Info("creating the service for the first time on the host cluster")
return reconcile.Result{}, r.hostClient.Create(ctx, syncedService)
return reconcile.Result{}, r.HostClient.Create(ctx, syncedService)
}
return reconcile.Result{}, err
@@ -125,7 +116,32 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
log.Info("updating service on the host cluster")
return reconcile.Result{}, r.hostClient.Update(ctx, syncedService)
return reconcile.Result{}, r.HostClient.Update(ctx, syncedService)
}
func (r *ServiceReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for serviceSyncConfig
syncConfig := cluster.Spec.Sync.Services
// If syncing is disabled, only process deletions to allow for cleanup.
if !syncConfig.Enabled {
return object.GetDeletionTimestamp() != nil
}
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
if labelSelector.Empty() {
return true
}
return labelSelector.Matches(labels.Set(object.GetLabels()))
}
func (s *ServiceReconciler) service(obj *v1.Service) *v1.Service {

View File

@@ -0,0 +1,276 @@
package syncer_test
import (
"context"
"fmt"
"time"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var ServiceTests = func() {
var (
namespace string
cluster v1beta1.Cluster
)
BeforeEach(func() {
ctx := context.Background()
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
}
err := hostTestEnv.k8sClient.Create(ctx, &ns)
Expect(err).NotTo(HaveOccurred())
namespace = ns.Name
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
Services: v1beta1.ServiceSyncConfig{
Enabled: true,
},
},
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
err = syncer.AddServiceSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
Expect(err).NotTo(HaveOccurred())
})
It("creates a service on the host cluster", func() {
ctx := context.Background()
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "service-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{
{
Name: "test-port",
Port: 8888,
TargetPort: intstr.FromInt32(8888),
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, service)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
var hostService v1.Service
hostServiceName := translateName(cluster, service.Namespace, service.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Service %s in host cluster", hostServiceName))
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
GinkgoWriter.Printf("labels: %v\n", hostService.Labels)
})
It("updates a service on the host cluster", func() {
ctx := context.Background()
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "service-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{
{
Name: "test-port",
Port: 8888,
TargetPort: intstr.FromInt32(8888),
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, service)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
var hostService v1.Service
hostServiceName := translateName(cluster, service.Namespace, service.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Service %s in host cluster", hostServiceName))
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
key := client.ObjectKeyFromObject(service)
err = virtTestEnv.k8sClient.Get(ctx, key, service)
Expect(err).NotTo(HaveOccurred())
service.Spec.Ports[0].Name = "test-port-updated"
// update virtual service
err = virtTestEnv.k8sClient.Update(ctx, service)
Expect(err).NotTo(HaveOccurred())
// check hostService
Eventually(func() string {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
Expect(err).NotTo(HaveOccurred())
return hostService.Spec.Ports[0].Name
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(Equal("test-port-updated"))
})
It("deletes a service on the host cluster", func() {
ctx := context.Background()
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "service-",
Namespace: "default",
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{
{
Name: "test-port",
Port: 8888,
TargetPort: intstr.FromInt32(8888),
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, service)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
var hostService v1.Service
hostServiceName := translateName(cluster, service.Namespace, service.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created service %s in host cluster", hostServiceName))
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
err = virtTestEnv.k8sClient.Delete(ctx, service)
Expect(err).NotTo(HaveOccurred())
Eventually(func() bool {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostService)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
It("will not create a service on the host cluster if disabled", func() {
ctx := context.Background()
cluster.Spec.Sync.Services.Enabled = false
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "service-",
Namespace: "default",
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{
{
Name: "test-port",
Port: 8888,
TargetPort: intstr.FromInt32(8888),
},
},
},
}
err = virtTestEnv.k8sClient.Create(ctx, service)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
var hostService v1.Service
hostServiceName := translateName(cluster, service.Namespace, service.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
}

View File

@@ -0,0 +1,15 @@
package syncer
import (
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
)
type SyncerContext struct {
ClusterName string
ClusterNamespace string
VirtualClient client.Client
HostClient client.Client
Translator translate.ToHostTranslator
}

View File

@@ -1,4 +1,4 @@
package controller_test
package syncer_test
import (
"context"
@@ -19,7 +19,8 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -91,7 +92,7 @@ func NewTestEnv() *TestEnv {
By("bootstrapping test environment")
testEnv := &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "charts", "k3k", "crds")},
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
ErrorIfCRDPathMissing: true,
BinaryAssetsDirectory: tempDir,
Scheme: buildScheme(),
@@ -118,7 +119,7 @@ func buildScheme() *runtime.Scheme {
err := clientgoscheme.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
@@ -165,5 +166,19 @@ var _ = Describe("Kubelet Controller", func() {
cancel()
})
Describe("PriorityClass", PriorityClassTests)
Describe("PriorityClass Syncer", PriorityClassTests)
Describe("ConfigMap Syncer", ConfigMapTests)
Describe("Secret Syncer", SecretTests)
Describe("Service Syncer", ServiceTests)
Describe("Ingress Syncer", IngressTests)
Describe("PersistentVolumeClaim Syncer", PVCTests)
})
func translateName(cluster v1beta1.Cluster, namespace, name string) string {
translator := translate.ToHostTranslator{
ClusterName: cluster.Name,
ClusterNamespace: cluster.Namespace,
}
return translator.TranslateName(namespace, name)
}

View File

@@ -7,6 +7,7 @@ import (
"strconv"
"strings"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
@@ -20,11 +21,10 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/log"
)
const (
webhookName = "podmutator.k3k.io"
webhookName = "podmutating.k3k.io"
webhookTimeout = int32(10)
webhookPath = "/mutate--v1-pod"
FieldpathField = "k3k.io/fieldpath"
@@ -36,14 +36,14 @@ type webhookHandler struct {
serviceName string
clusterName string
clusterNamespace string
logger *log.Logger
logger logr.Logger
webhookPort int
}
// AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to
// AddPodMutatingWebhook will add a mutating webhook to the virtual cluster to
// modify the nodeName of the created pods with the name of the virtual kubelet node name
// as well as remove any status fields of the downward apis env fields
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger, webhookPort int) error {
func AddPodMutatingWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger logr.Logger, webhookPort int) error {
handler := webhookHandler{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
@@ -54,7 +54,7 @@ func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient c
webhookPort: webhookPort,
}
// create mutator webhook configuration to the cluster
// create mutating webhook configuration to the cluster
config, err := handler.configuration(ctx, hostClient)
if err != nil {
return err
@@ -75,7 +75,7 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
return fmt.Errorf("invalid request: object was type %t not cluster", obj)
}
w.logger.Infow("mutator webhook request", "Pod", pod.Name, "Namespace", pod.Namespace)
w.logger.Info("mutating webhook request", "pod", pod.Name, "namespace", pod.Namespace)
// look for status.* fields in the env
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
@@ -100,7 +100,7 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
}
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
w.logger.Infow("extracting webhook tls from host cluster")
w.logger.Info("extracting webhook tls from host cluster")
var webhookTLSSecret v1.Secret

View File

@@ -11,11 +11,11 @@ import (
"os"
"time"
"github.com/go-logr/zapr"
"github.com/go-logr/logr"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/log/klogv2"
"github.com/virtual-kubelet/virtual-kubelet/node"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/user"
@@ -23,6 +23,7 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/webhook"
@@ -35,29 +36,25 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
k3kkubeletcontroller "github.com/rancher/k3k/k3k-kubelet/controller"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
k3klog "github.com/rancher/k3k/pkg/log"
)
var (
baseScheme = runtime.NewScheme()
k3kKubeletName = "k3k-kubelet"
)
var baseScheme = runtime.NewScheme()
func init() {
_ = clientgoscheme.AddToScheme(baseScheme)
_ = v1alpha1.AddToScheme(baseScheme)
_ = v1beta1.AddToScheme(baseScheme)
}
type kubelet struct {
virtualCluster v1alpha1.Cluster
virtualCluster v1beta1.Cluster
name string
port int
@@ -70,11 +67,11 @@ type kubelet struct {
hostMgr manager.Manager
virtualMgr manager.Manager
node *nodeutil.Node
logger *k3klog.Logger
logger logr.Logger
token string
}
func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet, error) {
func newKubelet(ctx context.Context, c *config, logger logr.Logger) (*kubelet, error) {
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostKubeconfig)
if err != nil {
return nil, err
@@ -97,7 +94,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, err
}
ctrl.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
ctrl.SetLogger(logger)
hostMetricsBindAddress := ":8083"
virtualMetricsBindAddress := ":8084"
@@ -150,34 +147,14 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
}
logger.Info("adding pod mutator webhook")
logger.Info("adding pod mutating webhook")
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error())
if err := k3kwebhook.AddPodMutatingWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
return nil, errors.New("unable to add pod mutating webhook for virtual cluster: " + err.Error())
}
logger.Info("adding service syncer controller")
if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add service syncer controller: " + err.Error())
}
logger.Info("adding pvc syncer controller")
if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add pvc syncer controller: " + err.Error())
}
logger.Info("adding pod pvc controller")
if err := k3kkubeletcontroller.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add pod pvc controller: " + err.Error())
}
logger.Info("adding priorityclass controller")
if err := k3kkubeletcontroller.AddPriorityClassReconciler(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add priorityclass controller: " + err.Error())
if err := addControllers(ctx, hostMgr, virtualMgr, c, hostClient); err != nil {
return nil, errors.New("failed to add controller: " + err.Error())
}
clusterIP, err := clusterIP(ctx, c.ServiceName, c.ClusterNamespace, hostClient)
@@ -193,7 +170,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, errors.New("failed to get the DNS service for the cluster: " + err.Error())
}
var virtualCluster v1alpha1.Cluster
var virtualCluster v1beta1.Cluster
if err := hostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &virtualCluster); err != nil {
return nil, errors.New("failed to get virtualCluster spec: " + err.Error())
}
@@ -209,7 +186,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
hostMgr: hostMgr,
virtualMgr: virtualMgr,
agentIP: clusterIP,
logger: logger.Named(k3kKubeletName),
logger: logger,
token: c.Token,
dnsIP: dnsService.Spec.ClusterIP,
port: c.KubeletPort,
@@ -231,9 +208,9 @@ func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostCl
return service.Spec.ClusterIP, nil
}
func (k *kubelet) registerNode(ctx context.Context, agentIP string, cfg config) error {
func (k *kubelet) registerNode(agentIP string, cfg config) error {
providerFunc := k.newProviderFunc(cfg)
nodeOpts := k.nodeOpts(ctx, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
nodeOpts := k.nodeOpts(cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
var err error
@@ -251,34 +228,36 @@ func (k *kubelet) start(ctx context.Context) {
go func() {
err := k.hostMgr.Start(ctx)
if err != nil {
k.logger.Fatalw("host manager stopped", zap.Error(err))
k.logger.Error(err, "host manager stopped")
}
}()
go func() {
err := k.virtualMgr.Start(ctx)
if err != nil {
k.logger.Fatalw("virtual manager stopped", zap.Error(err))
k.logger.Error(err, "virtual manager stopped")
}
}()
// run the node async so that we can wait for it to be ready in another call
go func() {
ctx = log.WithLogger(ctx, k.logger)
klog.SetLogger(k.logger)
ctx = log.WithLogger(ctx, klogv2.New(nil))
if err := k.node.Run(ctx); err != nil {
k.logger.Fatalw("node errored when running", zap.Error(err))
k.logger.Error(err, "node errored when running")
}
}()
if err := k.node.WaitReady(context.Background(), time.Minute*1); err != nil {
k.logger.Fatalw("node was not ready within timeout of 1 minute", zap.Error(err))
k.logger.Error(err, "node was not ready within timeout of 1 minute")
}
<-k.node.Done()
if err := k.node.Err(); err != nil {
k.logger.Fatalw("node stopped with an error", zap.Error(err))
k.logger.Error(err, "node stopped with an error")
}
k.logger.Info("node exited successfully")
@@ -297,7 +276,7 @@ func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
}
}
func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
func (k *kubelet) nodeOpts(srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
return func(c *nodeutil.NodeConfig) error {
c.HTTPListenAddr = fmt.Sprintf(":%d", srvPort)
// set up the routes
@@ -308,7 +287,7 @@ func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, ho
c.Handler = mux
tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, hostname, k.token, agentIP)
tlsConfig, err := loadTLSConfig(name, namespace, k.name, hostname, k.token, agentIP)
if err != nil {
return errors.New("unable to get tls config: " + err.Error())
}
@@ -319,12 +298,12 @@ func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, ho
}
}
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger *k3klog.Logger) (*rest.Config, error) {
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger logr.Logger) (*rest.Config, error) {
if virtualConfigPath != "" {
return clientcmd.BuildConfigFromFlags("", virtualConfigPath)
}
// virtual kubeconfig file is empty, trying to fetch the k3k cluster kubeconfig
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := hostClient.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, &cluster); err != nil {
return nil, err
}
@@ -338,7 +317,7 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
logger.Infow("decoded bootstrap", zap.Error(err))
logger.Error(err, "decoded bootstrap")
return err
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())
@@ -389,17 +368,10 @@ func kubeconfigBytes(url string, serverCA, clientCert, clientKey []byte) ([]byte
return clientcmd.Write(*config)
}
func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
var (
cluster v1alpha1.Cluster
b *bootstrap.ControlRuntimeBootstrap
)
func loadTLSConfig(clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
var b *bootstrap.ControlRuntimeBootstrap
if err := hostClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster); err != nil {
return nil, err
}
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace)
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(clusterName), clusterNamespace)
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
@@ -447,3 +419,56 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
Certificates: []tls.Certificate{clientCert},
}, nil
}
func addControllers(ctx context.Context, hostMgr, virtualMgr manager.Manager, c *config, hostClient ctrlruntimeclient.Client) error {
var cluster v1beta1.Cluster
objKey := types.NamespacedName{
Namespace: c.ClusterNamespace,
Name: c.ClusterName,
}
if err := hostClient.Get(ctx, objKey, &cluster); err != nil {
return err
}
if err := syncer.AddConfigMapSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add configmap global syncer: " + err.Error())
}
if err := syncer.AddSecretSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add secret global syncer: " + err.Error())
}
logger.Info("adding service syncer controller")
if err := syncer.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add service syncer controller: " + err.Error())
}
logger.Info("adding ingress syncer controller")
if err := syncer.AddIngressSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add ingress syncer controller: " + err.Error())
}
logger.Info("adding pvc syncer controller")
if err := syncer.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add pvc syncer controller: " + err.Error())
}
logger.Info("adding pod pvc controller")
if err := syncer.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add pod pvc controller: " + err.Error())
}
logger.Info("adding priorityclass controller")
if err := syncer.AddPriorityClassSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add priorityclass controller: " + err.Error())
}
return nil
}

View File

@@ -7,12 +7,12 @@ import (
"os"
"strings"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"go.uber.org/zap"
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
@@ -22,8 +22,9 @@ import (
var (
configFile string
cfg config
logger *log.Logger
logger logr.Logger
debug bool
logFormat string
)
func main() {
@@ -34,13 +35,16 @@ func main() {
if err := InitializeConfig(cmd); err != nil {
return err
}
logger = log.New(debug)
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
logger = zapr.NewLogger(log.New(debug, logFormat))
ctrlruntimelog.SetLogger(logger)
return nil
},
RunE: run,
}
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug logging")
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "json", "Log format (json or console)")
rootCmd.PersistentFlags().StringVar(&cfg.ClusterName, "cluster-name", "", "Name of the k3k cluster")
rootCmd.PersistentFlags().StringVar(&cfg.ClusterNamespace, "cluster-namespace", "", "Namespace of the k3k cluster")
rootCmd.PersistentFlags().StringVar(&cfg.Token, "token", "", "K3S token of the k3k cluster")
@@ -53,7 +57,6 @@ func main() {
rootCmd.PersistentFlags().StringVar(&cfg.ServerIP, "server-ip", "", "Server IP used for registering the virtual kubelet to the cluster")
rootCmd.PersistentFlags().StringVar(&cfg.Version, "version", "", "Version of kubernetes server")
rootCmd.PersistentFlags().StringVar(&configFile, "config", "/opt/rancher/k3k/config.yaml", "Path to k3k-kubelet config file")
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug logging")
rootCmd.PersistentFlags().BoolVar(&cfg.MirrorHostNodes, "mirror-host-nodes", false, "Mirror real node objects from host cluster")
if err := rootCmd.Execute(); err != nil {
@@ -73,7 +76,7 @@ func run(cmd *cobra.Command, args []string) error {
return fmt.Errorf("failed to create new virtual kubelet instance: %w", err)
}
if err := k.registerNode(ctx, k.agentIP, cfg); err != nil {
if err := k.registerNode(k.agentIP, cfg); err != nil {
return fmt.Errorf("failed to register new node: %w", err)
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -12,16 +13,15 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3klog "github.com/rancher/k3k/pkg/log"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func ConfigureNode(logger *k3klog.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string, mirrorHostNodes bool) {
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) {
ctx := context.Background()
if mirrorHostNodes {
hostNode, err := coreClient.Nodes().Get(ctx, node.Name, metav1.GetOptions{})
if err != nil {
logger.Fatal("error getting host node for mirroring", err)
logger.Error(err, "error getting host node for mirroring", err)
}
node.Spec = *hostNode.Spec.DeepCopy()
@@ -56,7 +56,7 @@ func ConfigureNode(logger *k3klog.Logger, node *corev1.Node, hostname string, se
go func() {
for range ticker.C {
if err := updateNodeCapacity(ctx, coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
logger.Error("error updating node capacity", err)
logger.Error(err, "error updating node capacity")
}
}
}()

View File

@@ -12,13 +12,13 @@ import (
"strings"
"time"
"github.com/go-logr/logr"
"github.com/google/go-cmp/cmp"
"github.com/virtual-kubelet/virtual-kubelet/node/api"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
@@ -38,13 +38,11 @@ import (
compbasemetrics "k8s.io/component-base/metrics"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/rancher/k3k/k3k-kubelet/controller"
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
)
// check at compile time if the Provider implements the nodeutil.Provider interface
@@ -53,22 +51,22 @@ var _ nodeutil.Provider = (*Provider)(nil)
// Provider implements nodetuil.Provider from virtual Kubelet.
// TODO: Implement NotifyPods and the required usage so that this can be an async provider
type Provider struct {
Handler controller.ControllerHandler
Translator translate.ToHostTranslator
HostClient client.Client
VirtualClient client.Client
VirtualManager manager.Manager
ClientConfig rest.Config
CoreClient cv1.CoreV1Interface
ClusterNamespace string
ClusterName string
serverIP string
dnsIP string
logger *k3klog.Logger
logger logr.Logger
}
var ErrRetryTimeout = errors.New("provider timed out")
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3klog.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger logr.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
coreClient, err := cv1.NewForConfig(&hostConfig)
if err != nil {
return nil, err
@@ -80,16 +78,9 @@ func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3
}
p := Provider{
Handler: controller.ControllerHandler{
Mgr: virtualMgr,
Scheme: *virtualMgr.GetScheme(),
HostClient: hostMgr.GetClient(),
VirtualClient: virtualMgr.GetClient(),
Translator: translator,
Logger: logger,
},
HostClient: hostMgr.GetClient(),
VirtualClient: virtualMgr.GetClient(),
VirtualManager: virtualMgr,
Translator: translator,
ClientConfig: hostConfig,
CoreClient: coreClient,
@@ -134,7 +125,7 @@ func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, con
}
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
p.logger.Infof("got error %s when getting logs for %s in %s", err, hostPodName, p.ClusterNamespace)
p.logger.Error(err, fmt.Sprintf("got error when getting logs for %s in %s", hostPodName, p.ClusterNamespace))
return closer, err
}
@@ -208,7 +199,7 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
// GetStatsSummary gets the stats for the node, including running pods
func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
p.logger.Debug("GetStatsSummary")
p.logger.V(1).Info("GetStatsSummary")
nodeList := &corev1.NodeList{}
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
@@ -340,7 +331,14 @@ func (p *Provider) CreatePod(ctx context.Context, pod *corev1.Pod) error {
// createPod takes a Kubernetes Pod and deploys it within the provider.
func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
tPod := pod.DeepCopy()
// fieldPath envs are not being translated correctly using the virtual kubelet pod controller
// as a workaround we will try to fetch the pod from the virtual cluster and copy over the envSource
var sourcePod corev1.Pod
if err := p.VirtualClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &sourcePod); err != nil {
return err
}
tPod := sourcePod.DeepCopy()
p.Translator.TranslateTo(tPod)
// get Cluster definition
@@ -349,7 +347,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
Name: p.ClusterName,
}
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
return fmt.Errorf("unable to get cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
@@ -385,12 +383,12 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
}
// fieldpath annotations
if err := p.configureFieldPathEnv(pod, tPod); err != nil {
if err := p.configureFieldPathEnv(&sourcePod, tPod); err != nil {
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
// volumes will often refer to resources in the virtual cluster, but instead need to refer to the sync'd
// host cluster version
if err := p.transformVolumes(ctx, pod.Namespace, tPod.Spec.Volumes); err != nil {
if err := p.transformVolumes(pod.Namespace, tPod.Spec.Volumes); err != nil {
return fmt.Errorf("unable to sync volumes for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
// sync serviceaccount token to a the host cluster
@@ -398,10 +396,14 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
return fmt.Errorf("unable to transform tokens for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
for i, imagePullSecret := range tPod.Spec.ImagePullSecrets {
tPod.Spec.ImagePullSecrets[i].Name = p.Translator.TranslateName(pod.Namespace, imagePullSecret.Name)
}
// inject networking information to the pod including the virtual cluster controlplane endpoint
configureNetworking(tPod, pod.Name, pod.Namespace, p.serverIP, p.dnsIP)
p.logger.Infow("creating pod",
p.logger.Info("creating pod",
"host_namespace", tPod.Namespace, "host_name", tPod.Name,
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
)
@@ -443,58 +445,22 @@ func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *corev
// transformVolumes changes the volumes to the representation in the host cluster. Will return an error
// if one/more volumes couldn't be transformed
func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, volumes []corev1.Volume) error {
func (p *Provider) transformVolumes(podNamespace string, volumes []corev1.Volume) error {
for _, volume := range volumes {
var optional bool
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
continue
}
// note: this needs to handle downward api volumes as well, but more thought is needed on how to do that
if volume.ConfigMap != nil {
if volume.ConfigMap.Optional != nil {
optional = *volume.ConfigMap.Optional
}
if err := p.syncConfigmap(ctx, podNamespace, volume.ConfigMap.Name, optional); err != nil {
return fmt.Errorf("unable to sync configmap volume %s: %w", volume.Name, err)
}
volume.ConfigMap.Name = p.Translator.TranslateName(podNamespace, volume.ConfigMap.Name)
} else if volume.Secret != nil {
if volume.Secret.Optional != nil {
optional = *volume.Secret.Optional
}
if err := p.syncSecret(ctx, podNamespace, volume.Secret.SecretName, optional); err != nil {
return fmt.Errorf("unable to sync secret volume %s: %w", volume.Name, err)
}
volume.Secret.SecretName = p.Translator.TranslateName(podNamespace, volume.Secret.SecretName)
} else if volume.Projected != nil {
for _, source := range volume.Projected.Sources {
if source.ConfigMap != nil {
if source.ConfigMap.Optional != nil {
optional = *source.ConfigMap.Optional
}
configMapName := source.ConfigMap.Name
if err := p.syncConfigmap(ctx, podNamespace, configMapName, optional); err != nil {
return fmt.Errorf("unable to sync projected configmap %s: %w", configMapName, err)
}
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, configMapName)
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, source.ConfigMap.Name)
} else if source.Secret != nil {
if source.Secret.Optional != nil {
optional = *source.Secret.Optional
}
secretName := source.Secret.Name
if err := p.syncSecret(ctx, podNamespace, secretName, optional); err != nil {
return fmt.Errorf("unable to sync projected secret %s: %w", secretName, err)
}
source.Secret.Name = p.Translator.TranslateName(podNamespace, secretName)
source.Secret.Name = p.Translator.TranslateName(podNamespace, source.Secret.Name)
}
}
} else if volume.PersistentVolumeClaim != nil {
@@ -517,64 +483,13 @@ func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, vo
return nil
}
// syncConfigmap will add the configmap object to the queue of the syncer controller to be synced to the host cluster
func (p *Provider) syncConfigmap(ctx context.Context, podNamespace string, configMapName string, optional bool) error {
var configMap corev1.ConfigMap
nsName := types.NamespacedName{
Namespace: podNamespace,
Name: configMapName,
}
if err := p.VirtualClient.Get(ctx, nsName, &configMap); err != nil {
// check if its optional configmap
if apierrors.IsNotFound(err) && optional {
return nil
}
return fmt.Errorf("unable to get configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
if err := p.Handler.AddResource(ctx, &configMap); err != nil {
return fmt.Errorf("unable to add configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
return nil
}
// syncSecret will add the secret object to the queue of the syncer controller to be synced to the host cluster
func (p *Provider) syncSecret(ctx context.Context, podNamespace string, secretName string, optional bool) error {
p.logger.Infow("Syncing secret", "Name", secretName, "Namespace", podNamespace, "optional", optional)
var secret corev1.Secret
nsName := types.NamespacedName{
Namespace: podNamespace,
Name: secretName,
}
if err := p.VirtualClient.Get(ctx, nsName, &secret); err != nil {
if apierrors.IsNotFound(err) && optional {
return nil
}
return fmt.Errorf("unable to get secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
if err := p.Handler.AddResource(ctx, &secret); err != nil {
return fmt.Errorf("unable to add secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
return nil
}
// UpdatePod executes updatePod with retry
func (p *Provider) UpdatePod(ctx context.Context, pod *corev1.Pod) error {
return p.withRetry(ctx, p.updatePod, pod)
}
func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
p.logger.Debugw("got a request for update pod")
p.logger.V(1).Info("got a request for update pod")
// Once scheduled a Pod cannot update other fields than the image of the containers, initcontainers and a few others
// See: https://kubernetes.io/docs/concepts/workloads/pods/#pod-update-and-replacement
@@ -604,13 +519,18 @@ func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
currentHostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, currentHostPod.Name, &currentHostPod, metav1.UpdateOptions{}); err != nil {
p.logger.Errorf("error when updating ephemeral containers: %v", err)
p.logger.Error(err, "error when updating ephemeral containers")
return err
}
return nil
}
// fieldpath annotations
if err := p.configureFieldPathEnv(&currentVirtualPod, &currentHostPod); err != nil {
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
currentVirtualPod.Spec.Containers = updateContainerImages(currentVirtualPod.Spec.Containers, pod.Spec.Containers)
currentVirtualPod.Spec.InitContainers = updateContainerImages(currentVirtualPod.Spec.InitContainers, pod.Spec.InitContainers)
@@ -670,85 +590,20 @@ func (p *Provider) DeletePod(ctx context.Context, pod *corev1.Pod) error {
// expected to call the NotifyPods callback with a terminal pod status where all the containers are in a terminal
// state, as well as the pod. DeletePod may be called multiple times for the same pod.
func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
p.logger.Infof("Got request to delete pod %s", pod.Name)
p.logger.Info(fmt.Sprintf("got request to delete pod %s/%s", pod.Namespace, pod.Name))
hostName := p.Translator.TranslateName(pod.Namespace, pod.Name)
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostName, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
p.logger.Info(fmt.Sprintf("pod %s/%s already deleted from host cluster", p.ClusterNamespace, hostName))
return nil
}
return fmt.Errorf("unable to delete pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
if err = p.pruneUnusedVolumes(ctx, pod); err != nil {
// note that we don't return an error here. The pod was successfully deleted, another process
// should clean this without affecting the user
p.logger.Errorf("failed to prune leftover volumes for %s/%s: %w, resources may be left", pod.Namespace, pod.Name, err)
}
p.logger.Infof("Deleted pod %s", pod.Name)
return nil
}
// pruneUnusedVolumes removes volumes in use by pod that aren't used by any other pods
func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) error {
rawSecrets, rawConfigMaps := getSecretsAndConfigmaps(pod)
// since this pod was removed, originally mark all of the secrets/configmaps it uses as eligible
// for pruning
pruneSecrets := sets.Set[string]{}.Insert(rawSecrets...)
pruneConfigMap := sets.Set[string]{}.Insert(rawConfigMaps...)
var pods corev1.PodList
// only pods in the same namespace could be using secrets/configmaps that this pod is using
err := p.VirtualClient.List(ctx, &pods, &client.ListOptions{
Namespace: pod.Namespace,
})
if err != nil {
return fmt.Errorf("unable to list pods: %w", err)
}
for _, vPod := range pods.Items {
if vPod.Name == pod.Name {
continue
}
secrets, configMaps := getSecretsAndConfigmaps(&vPod)
pruneSecrets.Delete(secrets...)
pruneConfigMap.Delete(configMaps...)
}
for _, secretName := range pruneSecrets.UnsortedList() {
var secret corev1.Secret
key := types.NamespacedName{
Name: secretName,
Namespace: pod.Namespace,
}
if err := p.VirtualClient.Get(ctx, key, &secret); err != nil {
return fmt.Errorf("unable to get secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
}
if err = p.Handler.RemoveResource(ctx, &secret); err != nil {
return fmt.Errorf("unable to remove secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
}
}
for _, configMapName := range pruneConfigMap.UnsortedList() {
var configMap corev1.ConfigMap
key := types.NamespacedName{
Name: configMapName,
Namespace: pod.Namespace,
}
if err := p.VirtualClient.Get(ctx, key, &configMap); err != nil {
return fmt.Errorf("unable to get configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
}
if err = p.Handler.RemoveResource(ctx, &configMap); err != nil {
return fmt.Errorf("unable to remove configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
}
}
p.logger.Info(fmt.Sprintf("pod %s/%s deleted from host cluster", p.ClusterNamespace, hostName))
return nil
}
@@ -758,7 +613,7 @@ func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) erro
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.Pod, error) {
p.logger.Debugw("got a request for get pod", "Namespace", namespace, "Name", name)
p.logger.V(1).Info("got a request for get pod", "namespace", namespace, "name", name)
hostNamespaceName := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: p.Translator.TranslateName(namespace, name),
@@ -780,14 +635,14 @@ func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*corev1.PodStatus, error) {
p.logger.Debugw("got a request for pod status", "Namespace", namespace, "Name", name)
p.logger.V(1).Info("got a request for pod status", "namespace", namespace, "name", name)
pod, err := p.GetPod(ctx, namespace, name)
if err != nil {
return nil, fmt.Errorf("unable to get pod for status: %w", err)
}
p.logger.Debugw("got pod status", "Namespace", namespace, "Name", name, "Status", pod.Status)
p.logger.V(1).Info("got pod status", "namespace", namespace, "name", name, "status", pod.Status)
return pod.Status.DeepCopy(), nil
}
@@ -869,22 +724,22 @@ func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP
// inject networking information to the pod's environment variables
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].Env = overrideEnvVars(pod.Spec.Containers[i].Env, updatedEnvVars)
pod.Spec.Containers[i].Env = mergeEnvVars(pod.Spec.Containers[i].Env, updatedEnvVars)
}
// handle init containers as well
for i := range pod.Spec.InitContainers {
pod.Spec.InitContainers[i].Env = overrideEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
pod.Spec.InitContainers[i].Env = mergeEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
}
// handle ephemeral containers as well
for i := range pod.Spec.EphemeralContainers {
pod.Spec.EphemeralContainers[i].Env = overrideEnvVars(pod.Spec.EphemeralContainers[i].Env, updatedEnvVars)
pod.Spec.EphemeralContainers[i].Env = mergeEnvVars(pod.Spec.EphemeralContainers[i].Env, updatedEnvVars)
}
}
// overrideEnvVars will override the orig environment variables if found in the updated list
func overrideEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
// mergeEnvVars will override the orig environment variables if found in the updated list and will add them to the list if not found
func mergeEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
if len(updated) == 0 {
return orig
}
@@ -895,43 +750,23 @@ func overrideEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
updatedEnvVarMap[updatedEnvVar.Name] = updatedEnvVar
}
for i, origEnvVar := range orig {
if updatedEnvVar, found := updatedEnvVarMap[origEnvVar.Name]; found {
orig[i] = updatedEnvVar
for i, env := range orig {
if updatedEnv, ok := updatedEnvVarMap[env.Name]; ok {
orig[i] = updatedEnv
// Remove the updated variable from the map
delete(updatedEnvVarMap, env.Name)
}
}
// Any variables remaining in the map are new and should be appended to the original slice.
for _, env := range updatedEnvVarMap {
orig = append(orig, env)
}
return orig
}
// getSecretsAndConfigmaps retrieves a list of all secrets/configmaps that are in use by a given pod. Useful
// for removing/seeing which virtual cluster resources need to be in the host cluster.
func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
var (
secrets []string
configMaps []string
)
for _, volume := range pod.Spec.Volumes {
if volume.Secret != nil {
secrets = append(secrets, volume.Secret.SecretName)
} else if volume.ConfigMap != nil {
configMaps = append(configMaps, volume.ConfigMap.Name)
} else if volume.Projected != nil {
for _, source := range volume.Projected.Sources {
if source.ConfigMap != nil {
configMaps = append(configMaps, source.ConfigMap.Name)
} else if source.Secret != nil {
secrets = append(secrets, source.Secret.Name)
}
}
}
}
return secrets, configMaps
}
// configureFieldPathEnv will retrieve all annotations created by the pod mutator webhook
// configureFieldPathEnv will retrieve all annotations created by the pod mutating webhook
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
// assigned annotations
func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {

View File

@@ -7,7 +7,7 @@ import (
corev1 "k8s.io/api/core/v1"
)
func Test_overrideEnvVars(t *testing.T) {
func Test_mergeEnvVars(t *testing.T) {
type args struct {
orig []corev1.EnvVar
new []corev1.EnvVar
@@ -32,7 +32,7 @@ func Test_overrideEnvVars(t *testing.T) {
orig: []corev1.EnvVar{},
new: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
},
want: []corev1.EnvVar{},
want: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
},
{
name: "orig has a matching element",
@@ -56,14 +56,14 @@ func Test_overrideEnvVars(t *testing.T) {
orig: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
new: []corev1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
},
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := overrideEnvVars(tt.args.orig, tt.args.new); !reflect.DeepEqual(got, tt.want) {
t.Errorf("overrideEnvVars() = %v, want %v", got, tt.want)
if got := mergeEnvVars(tt.args.orig, tt.args.new); !reflect.DeepEqual(got, tt.want) {
t.Errorf("mergeEnvVars() = %v, want %v", got, tt.want)
}
})
}

View File

@@ -23,7 +23,7 @@ const (
// transformTokens copies the serviceaccount tokens used by pod's serviceaccount to a secret on the host cluster and mount it
// to look like the serviceaccount token
func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error {
p.logger.Infow("transforming token", "Pod", pod.Name, "Namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
p.logger.Info("transforming token", "pod", pod.Name, "namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
// skip this process if the kube-api-access is already removed from the pod
// this is needed in case users already adds their own custom tokens like in rancher imported clusters

View File

@@ -4,8 +4,10 @@ import (
"encoding/hex"
"strings"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
@@ -34,6 +36,13 @@ type ToHostTranslator struct {
ClusterNamespace string
}
func NewHostTranslator(cluster *v1beta1.Cluster) *ToHostTranslator {
return &ToHostTranslator{
ClusterName: cluster.Name,
ClusterNamespace: cluster.Namespace,
}
}
// Translate translates a virtual cluster object to a host cluster object. This should only be used for
// static resources such as configmaps/secrets, and not for things like pods (which can reference other
// objects). Note that this won't set host-cluster values (like resource version) so when updating you
@@ -125,3 +134,11 @@ func (t *ToHostTranslator) TranslateName(namespace string, name string) string {
return controller.SafeConcatName(namePrefix, nameSuffix)
}
// NamespacedName returns the types.NamespacedName of the resource in the host cluster
func (t *ToHostTranslator) NamespacedName(obj client.Object) types.NamespacedName {
return types.NamespacedName{
Namespace: t.ClusterNamespace,
Name: t.TranslateName(obj.GetNamespace(), obj.GetName()),
}
}

91
main.go
View File

@@ -5,10 +5,13 @@ import (
"context"
"errors"
"fmt"
"os"
"os/signal"
"syscall"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"github.com/spf13/cobra"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/manager"
@@ -19,7 +22,7 @@ import (
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
@@ -28,23 +31,20 @@ import (
)
var (
scheme = runtime.NewScheme()
clusterCIDR string
sharedAgentImage string
sharedAgentImagePullPolicy string
kubeconfig string
k3SImage string
k3SImagePullPolicy string
kubeletPortRange string
webhookPortRange string
maxConcurrentReconciles int
debug bool
logger *log.Logger
scheme = runtime.NewScheme()
config cluster.Config
kubeconfig string
kubeletPortRange string
webhookPortRange string
maxConcurrentReconciles int
debug bool
logFormat string
logger logr.Logger
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = v1alpha1.AddToScheme(scheme)
_ = v1beta1.AddToScheme(scheme)
}
func main() {
@@ -57,31 +57,38 @@ func main() {
},
PersistentPreRun: func(cmd *cobra.Command, args []string) {
cmds.InitializeConfig(cmd)
logger = log.New(debug)
logger = zapr.NewLogger(log.New(debug, logFormat))
},
RunE: run,
}
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Debug level logging")
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "json", "Log format (json or console)")
rootCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", "", "kubeconfig path")
rootCmd.PersistentFlags().StringVar(&clusterCIDR, "cluster-cidr", "", "Cluster CIDR to be added to the networkpolicy")
rootCmd.PersistentFlags().StringVar(&sharedAgentImage, "shared-agent-image", "", "K3K Virtual Kubelet image")
rootCmd.PersistentFlags().StringVar(&sharedAgentImagePullPolicy, "shared-agent-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&config.ClusterCIDR, "cluster-cidr", "", "Cluster CIDR to be added to the networkpolicy")
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImage, "agent-shared-image", "rancher/k3k-kubelet", "K3K Virtual Kubelet image")
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImagePullPolicy, "agent-shared-image-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImage, "agent-virtual-image", "rancher/k3s", "K3S Virtual Agent image")
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImagePullPolicy, "agent-virtual-image-pull-policy", "", "K3S Virtual Agent image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&kubeletPortRange, "kubelet-port-range", "50000-51000", "Port Range for k3k kubelet in shared mode")
rootCmd.PersistentFlags().StringVar(&webhookPortRange, "webhook-port-range", "51001-52000", "Port Range for k3k kubelet webhook in shared mode")
rootCmd.PersistentFlags().StringVar(&k3SImage, "k3s-image", "rancher/k3k", "K3K server image")
rootCmd.PersistentFlags().StringVar(&k3SImagePullPolicy, "k3s-image-pull-policy", "", "K3K server image pull policy")
rootCmd.PersistentFlags().StringVar(&config.K3SServerImage, "k3s-server-image", "rancher/k3s", "K3K server image")
rootCmd.PersistentFlags().StringVar(&config.K3SServerImagePullPolicy, "k3s-server-image-pull-policy", "", "K3K server image pull policy")
rootCmd.PersistentFlags().StringSliceVar(&config.ServerImagePullSecrets, "server-image-pull-secret", nil, "Image pull secret used for for servers")
rootCmd.PersistentFlags().StringSliceVar(&config.AgentImagePullSecrets, "agent-image-pull-secret", nil, "Image pull secret used for for agents")
rootCmd.PersistentFlags().IntVar(&maxConcurrentReconciles, "max-concurrent-reconciles", 50, "maximum number of concurrent reconciles")
if err := rootCmd.Execute(); err != nil {
logger.Fatalw("failed to run k3k controller", zap.Error(err))
logger.Error(err, "failed to run k3k controller")
}
}
func run(cmd *cobra.Command, args []string) error {
ctx := context.Background()
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer stop()
logger.Info("Starting k3k - Version: " + buildinfo.Version)
ctrlruntimelog.SetLogger(logger)
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
@@ -95,8 +102,6 @@ func run(cmd *cobra.Command, args []string) error {
return fmt.Errorf("failed to create new controller runtime manager: %v", err)
}
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
logger.Info("adding cluster controller")
portAllocator, err := agent.NewPortAllocator(ctx, mgr.GetClient())
@@ -109,34 +114,48 @@ func run(cmd *cobra.Command, args []string) error {
return err
}
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage, k3SImagePullPolicy, maxConcurrentReconciles, portAllocator, nil); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
if err := cluster.Add(ctx, mgr, &config, maxConcurrentReconciles, portAllocator, nil); err != nil {
return fmt.Errorf("failed to add cluster controller: %v", err)
}
logger.Info("adding etcd pod controller")
logger.Info("adding statefulset controller")
if err := cluster.AddStatefulSetController(ctx, mgr, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add statefulset controller: %v", err)
}
logger.Info("adding service controller")
if err := cluster.AddServiceController(ctx, mgr, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add service controller: %v", err)
}
logger.Info("adding pod controller")
if err := cluster.AddPodController(ctx, mgr, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
return fmt.Errorf("failed to add pod controller: %v", err)
}
logger.Info("adding clusterpolicy controller")
if err := policy.Add(mgr, clusterCIDR, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add the clusterpolicy controller: %v", err)
if err := policy.Add(mgr, config.ClusterCIDR, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add clusterpolicy controller: %v", err)
}
if err := mgr.Start(ctx); err != nil {
return fmt.Errorf("failed to start the manager: %v", err)
return fmt.Errorf("failed to start manager: %v", err)
}
logger.Info("controller manager stopped")
return nil
}
func validate() error {
if sharedAgentImagePullPolicy != "" {
if sharedAgentImagePullPolicy != string(v1.PullAlways) &&
sharedAgentImagePullPolicy != string(v1.PullIfNotPresent) &&
sharedAgentImagePullPolicy != string(v1.PullNever) {
if config.SharedAgentImagePullPolicy != "" {
if config.SharedAgentImagePullPolicy != string(v1.PullAlways) &&
config.SharedAgentImagePullPolicy != string(v1.PullIfNotPresent) &&
config.SharedAgentImagePullPolicy != string(v1.PullNever) {
return errors.New("invalid value for shared agent image policy")
}
}

View File

@@ -1,3 +1,3 @@
// +k8s:deepcopy-gen=package
// +groupName=k3k.io
package v1alpha1
package v1beta1

View File

@@ -1,4 +1,4 @@
package v1alpha1
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
@@ -10,7 +10,7 @@ import (
)
var (
SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1alpha1"}
SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1beta1"}
SchemBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemBuilder.AddToScheme
)

View File

@@ -1,4 +1,4 @@
package v1alpha1
package v1beta1
import (
v1 "k8s.io/api/core/v1"
@@ -103,6 +103,7 @@ type ClusterSpec struct {
// Expose specifies options for exposing the API server.
// By default, it's only exposed as a ClusterIP.
//
// +kubebuilder:validation:XValidation:rule="[has(self.ingress), has(self.loadbalancer), has(self.nodePort)].filter(x, x).size() <= 1",message="ingress, loadbalancer and nodePort are mutually exclusive; only one can be set"
// +optional
Expose *ExposeConfig `json:"expose,omitempty"`
@@ -176,6 +177,118 @@ type ClusterSpec struct {
//
// +optional
CustomCAs CustomCAs `json:"customCAs,omitempty"`
// Sync specifies the resources types that will be synced from virtual cluster to host cluster.
//
// +kubebuilder:default={}
// +optional
Sync *SyncConfig `json:"sync,omitempty"`
}
// SyncConfig will contain the resources that should be synced from virtual cluster to host cluster.
type SyncConfig struct {
// Services resources sync configuration.
//
// +kubebuilder:default={"enabled": true}
Services ServiceSyncConfig `json:"services,omitempty"`
// ConfigMaps resources sync configuration.
//
// +kubebuilder:default={"enabled": true}
ConfigMaps ConfigMapSyncConfig `json:"configmaps,omitempty"`
// Secrets resources sync configuration.
//
// +kubebuilder:default={"enabled": true}
Secrets SecretSyncConfig `json:"secrets,omitempty"`
// Ingresses resources sync configuration.
//
// +kubebuilder:default={"enabled": false}
Ingresses IngressSyncConfig `json:"ingresses,omitempty"`
// PersistentVolumeClaims resources sync configuration.
//
// +kubebuilder:default={"enabled": true}
PersistentVolumeClaims PersistentVolumeClaimSyncConfig `json:"persistentVolumeClaims,omitempty"`
// PriorityClasses resources sync configuration.
//
// +kubebuilder:default={"enabled": false}
PriorityClasses PriorityClassSyncConfig `json:"priorityClasses,omitempty"`
}
// SecretSyncConfig specifies the sync options for services.
type SecretSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
// +optional
Enabled bool `json:"enabled,omitempty"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// ServiceSyncConfig specifies the sync options for services.
type ServiceSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
// +optional
Enabled bool `json:"enabled,omitempty"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// ConfigMapSyncConfig specifies the sync options for services.
type ConfigMapSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
// +optional
Enabled bool `json:"enabled,omitempty"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// IngressSyncConfig specifies the sync options for services.
type IngressSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
// +optional
Enabled bool `json:"enabled,omitempty"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// PersistentVolumeClaimSyncConfig specifies the sync options for services.
type PersistentVolumeClaimSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
// +optional
Enabled bool `json:"enabled,omitempty"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// PriorityClassSyncConfig specifies the sync options for services.
type PriorityClassSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
// +optional
Enabled bool `json:"enabled,omitempty"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// ClusterMode is the possible provisioning mode of a Cluster.
@@ -230,7 +343,7 @@ type PersistenceConfig struct {
// StorageRequestSize is the requested size for the PVC.
// This field is only relevant in "dynamic" mode.
//
// +kubebuilder:default="1G"
// +kubebuilder:default="2G"
// +optional
StorageRequestSize string `json:"storageRequestSize,omitempty"`
}
@@ -484,6 +597,12 @@ type VirtualClusterPolicySpec struct {
//
// +optional
PodSecurityAdmissionLevel *PodSecurityAdmissionLevel `json:"podSecurityAdmissionLevel,omitempty"`
// Sync specifies the resources types that will be synced from virtual cluster to host cluster.
//
// +kubebuilder:default={}
// +optional
Sync *SyncConfig `json:"sync,omitempty"`
}
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.

View File

@@ -2,7 +2,7 @@
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
package v1beta1
import (
"k8s.io/api/core/v1"
@@ -164,6 +164,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
}
}
out.CustomCAs = in.CustomCAs
if in.Sync != nil {
in, out := &in.Sync, &out.Sync
*out = new(SyncConfig)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
@@ -203,6 +208,28 @@ func (in *ClusterStatus) DeepCopy() *ClusterStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigMapSyncConfig) DeepCopyInto(out *ConfigMapSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapSyncConfig.
func (in *ConfigMapSyncConfig) DeepCopy() *ConfigMapSyncConfig {
if in == nil {
return nil
}
out := new(ConfigMapSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CredentialSource) DeepCopyInto(out *CredentialSource) {
*out = *in
@@ -307,6 +334,28 @@ func (in *IngressConfig) DeepCopy() *IngressConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressSyncConfig) DeepCopyInto(out *IngressSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSyncConfig.
func (in *IngressSyncConfig) DeepCopy() *IngressSyncConfig {
if in == nil {
return nil
}
out := new(IngressSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerConfig) DeepCopyInto(out *LoadBalancerConfig) {
*out = *in
@@ -377,6 +426,115 @@ func (in *PersistenceConfig) DeepCopy() *PersistenceConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeClaimSyncConfig) DeepCopyInto(out *PersistentVolumeClaimSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSyncConfig.
func (in *PersistentVolumeClaimSyncConfig) DeepCopy() *PersistentVolumeClaimSyncConfig {
if in == nil {
return nil
}
out := new(PersistentVolumeClaimSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityClassSyncConfig) DeepCopyInto(out *PriorityClassSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassSyncConfig.
func (in *PriorityClassSyncConfig) DeepCopy() *PriorityClassSyncConfig {
if in == nil {
return nil
}
out := new(PriorityClassSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretSyncConfig) DeepCopyInto(out *SecretSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretSyncConfig.
func (in *SecretSyncConfig) DeepCopy() *SecretSyncConfig {
if in == nil {
return nil
}
out := new(SecretSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceSyncConfig) DeepCopyInto(out *ServiceSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSyncConfig.
func (in *ServiceSyncConfig) DeepCopy() *ServiceSyncConfig {
if in == nil {
return nil
}
out := new(ServiceSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncConfig) DeepCopyInto(out *SyncConfig) {
*out = *in
in.Services.DeepCopyInto(&out.Services)
in.ConfigMaps.DeepCopyInto(&out.ConfigMaps)
in.Secrets.DeepCopyInto(&out.Secrets)
in.Ingresses.DeepCopyInto(&out.Ingresses)
in.PersistentVolumeClaims.DeepCopyInto(&out.PersistentVolumeClaims)
in.PriorityClasses.DeepCopyInto(&out.PriorityClasses)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfig.
func (in *SyncConfig) DeepCopy() *SyncConfig {
if in == nil {
return nil
}
out := new(SyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualClusterPolicy) DeepCopyInto(out *VirtualClusterPolicy) {
*out = *in
@@ -461,6 +619,11 @@ func (in *VirtualClusterPolicySpec) DeepCopyInto(out *VirtualClusterPolicySpec)
*out = new(PodSecurityAdmissionLevel)
**out = **in
}
if in.Sync != nil {
in, out := &in.Sync, &out.Sync
*out = new(SyncConfig)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicySpec.

View File

@@ -11,7 +11,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
@@ -24,12 +24,12 @@ type ResourceEnsurer interface {
}
type Config struct {
cluster *v1alpha1.Cluster
cluster *v1beta1.Cluster
client ctrlruntimeclient.Client
scheme *runtime.Scheme
}
func NewConfig(cluster *v1alpha1.Cluster, client ctrlruntimeclient.Client, scheme *runtime.Scheme) *Config {
func NewConfig(cluster *v1beta1.Cluster, client ctrlruntimeclient.Client, scheme *runtime.Scheme) *Config {
return &Config{
cluster: cluster,
client: client,

View File

@@ -19,7 +19,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
)
@@ -31,23 +31,26 @@ const (
type SharedAgent struct {
*Config
serviceIP string
image string
imagePullPolicy string
token string
kubeletPort int
webhookPort int
serviceIP string
image string
imagePullPolicy string
imageRegistry string
token string
kubeletPort int
webhookPort int
imagePullSecrets []string
}
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string, kubeletPort, webhookPort int) *SharedAgent {
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string, kubeletPort, webhookPort int, imagePullSecrets []string) *SharedAgent {
return &SharedAgent{
Config: config,
serviceIP: serviceIP,
image: image,
imagePullPolicy: imagePullPolicy,
token: token,
kubeletPort: kubeletPort,
webhookPort: webhookPort,
Config: config,
serviceIP: serviceIP,
image: image,
imagePullPolicy: imagePullPolicy,
token: token,
kubeletPort: kubeletPort,
webhookPort: webhookPort,
imagePullSecrets: imagePullSecrets,
}
}
@@ -96,7 +99,7 @@ func (s *SharedAgent) config(ctx context.Context) error {
return s.ensureObject(ctx, configSecret)
}
func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string, kubeletPort, webhookPort int) string {
func sharedAgentData(cluster *v1beta1.Cluster, serviceName, token, ip string, kubeletPort, webhookPort int) string {
version := cluster.Spec.Version
if cluster.Spec.Version == "" {
version = cluster.Status.HostVersion
@@ -156,7 +159,13 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
dnsPolicy = v1.DNSClusterFirstWithHostNet
}
return v1.PodSpec{
image := s.image
if s.imageRegistry != "" {
image = s.imageRegistry + "/" + s.image
}
podSpec := v1.PodSpec{
HostNetwork: hostNetwork,
DNSPolicy: dnsPolicy,
ServiceAccountName: s.Name(),
@@ -202,7 +211,7 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
Containers: []v1.Container{
{
Name: s.Name(),
Image: s.image,
Image: image,
ImagePullPolicy: v1.PullPolicy(s.imagePullPolicy),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{},
@@ -254,6 +263,11 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
},
},
}
for _, imagePullSecret := range s.imagePullSecrets {
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
}
return podSpec
}
func (s *SharedAgent) service(ctx context.Context) error {
@@ -367,6 +381,11 @@ func (s *SharedAgent) role(ctx context.Context) error {
Resources: []string{"persistentvolumeclaims", "pods", "pods/log", "pods/attach", "pods/exec", "pods/ephemeralcontainers", "secrets", "configmaps", "services"},
Verbs: []string{"*"},
},
{
APIGroups: []string{"networking.k8s.io"},
Resources: []string{"ingresses"},
Verbs: []string{"*"},
},
{
APIGroups: []string{"k3k.io"},
Resources: []string{"clusters"},

View File

@@ -8,12 +8,12 @@ import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func Test_sharedAgentData(t *testing.T) {
type args struct {
cluster *v1alpha1.Cluster
cluster *v1beta1.Cluster
serviceName string
ip string
kubeletPort int
@@ -29,12 +29,12 @@ func Test_sharedAgentData(t *testing.T) {
{
name: "simple config",
args: args{
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Version: "v1.2.3",
},
},
@@ -59,15 +59,15 @@ func Test_sharedAgentData(t *testing.T) {
{
name: "version in status",
args: args{
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Version: "v1.2.3",
},
Status: v1alpha1.ClusterStatus{
Status: v1beta1.ClusterStatus{
HostVersion: "v1.3.3",
},
},
@@ -92,12 +92,12 @@ func Test_sharedAgentData(t *testing.T) {
{
name: "missing version in spec",
args: args{
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Status: v1alpha1.ClusterStatus{
Status: v1beta1.ClusterStatus{
HostVersion: "v1.3.3",
},
},

View File

@@ -22,19 +22,22 @@ const (
type VirtualAgent struct {
*Config
serviceIP string
token string
k3SImage string
k3SImagePullPolicy string
serviceIP string
token string
Image string
ImagePullPolicy string
ImageRegistry string
imagePullSecrets []string
}
func NewVirtualAgent(config *Config, serviceIP, token string, k3SImage string, k3SImagePullPolicy string) *VirtualAgent {
func NewVirtualAgent(config *Config, serviceIP, token, Image, ImagePullPolicy string, imagePullSecrets []string) *VirtualAgent {
return &VirtualAgent{
Config: config,
serviceIP: serviceIP,
token: token,
k3SImage: k3SImage,
k3SImagePullPolicy: k3SImagePullPolicy,
Config: config,
serviceIP: serviceIP,
token: token,
Image: Image,
ImagePullPolicy: ImagePullPolicy,
imagePullSecrets: imagePullSecrets,
}
}
@@ -84,7 +87,7 @@ with-node-id: true`, serviceIP, token)
}
func (v *VirtualAgent) deployment(ctx context.Context) error {
image := controller.K3SImage(v.cluster, v.k3SImage)
image := controller.K3SImage(v.cluster, v.Image)
const name = "k3k-agent"
@@ -183,7 +186,7 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
{
Name: name,
Image: image,
ImagePullPolicy: v1.PullPolicy(v.k3SImagePullPolicy),
ImagePullPolicy: v1.PullPolicy(v.ImagePullPolicy),
SecurityContext: &v1.SecurityContext{
Privileged: ptr.To(true),
},
@@ -243,5 +246,9 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
}
}
for _, imagePullSecret := range v.imagePullSecrets {
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
}
return podSpec
}

View File

@@ -0,0 +1,35 @@
package cluster
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
v1 "k8s.io/api/core/v1"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller"
)
// newVirtualClient creates a new Client that can be used to interact with the virtual cluster
func newVirtualClient(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace string) (ctrlruntimeclient.Client, error) {
var clusterKubeConfig v1.Secret
kubeconfigSecretName := types.NamespacedName{
Name: controller.SafeConcatNameWithPrefix(clusterName, "kubeconfig"),
Namespace: clusterNamespace,
}
if err := hostClient.Get(ctx, kubeconfigSecretName, &clusterKubeConfig); err != nil {
return nil, fmt.Errorf("failed to get kubeconfig secret: %w", err)
}
restConfig, err := clientcmd.RESTConfigFromKubeConfig(clusterKubeConfig.Data["kubeconfig.yaml"])
if err != nil {
return nil, fmt.Errorf("failed to create config from kubeconfig file: %w", err)
}
return ctrlruntimeclient.New(restConfig, ctrlruntimeclient.Options{})
}

View File

@@ -33,7 +33,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/server"
@@ -46,7 +46,6 @@ const (
namePrefix = "k3k"
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
ClusterInvalidName = "system"
defaultVirtualClusterCIDR = "10.52.0.0/16"
@@ -61,26 +60,36 @@ var (
ErrCustomCACertSecretMissing = errors.New("custom CA certificate secret is missing")
)
type Config struct {
ClusterCIDR string
SharedAgentImage string
SharedAgentImagePullPolicy string
VirtualAgentImage string
VirtualAgentImagePullPolicy string
K3SServerImage string
K3SServerImagePullPolicy string
ServerImagePullSecrets []string
AgentImagePullSecrets []string
}
type ClusterReconciler struct {
DiscoveryClient *discovery.DiscoveryClient
Client client.Client
Scheme *runtime.Scheme
PortAllocator *agent.PortAllocator
record.EventRecorder
SharedAgentImage string
SharedAgentImagePullPolicy string
K3SImage string
K3SImagePullPolicy string
PortAllocator *agent.PortAllocator
Config
}
// Add adds a new controller to the manager
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage string, k3SImagePullPolicy string, maxConcurrentReconciles int, portAllocator *agent.PortAllocator, eventRecorder record.EventRecorder) error {
func Add(ctx context.Context, mgr manager.Manager, config *Config, maxConcurrentReconciles int, portAllocator *agent.PortAllocator, eventRecorder record.EventRecorder) error {
discoveryClient, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
return err
}
if sharedAgentImage == "" {
if config.SharedAgentImage == "" {
return errors.New("missing shared agent image")
}
@@ -90,19 +99,25 @@ func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgent
// initialize a new Reconciler
reconciler := ClusterReconciler{
DiscoveryClient: discoveryClient,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
EventRecorder: eventRecorder,
SharedAgentImage: sharedAgentImage,
SharedAgentImagePullPolicy: sharedAgentImagePullPolicy,
K3SImage: k3SImage,
K3SImagePullPolicy: k3SImagePullPolicy,
PortAllocator: portAllocator,
DiscoveryClient: discoveryClient,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
EventRecorder: eventRecorder,
PortAllocator: portAllocator,
Config: Config{
SharedAgentImage: config.SharedAgentImage,
SharedAgentImagePullPolicy: config.SharedAgentImagePullPolicy,
VirtualAgentImage: config.VirtualAgentImage,
VirtualAgentImagePullPolicy: config.VirtualAgentImagePullPolicy,
K3SServerImage: config.K3SServerImage,
K3SServerImagePullPolicy: config.K3SServerImagePullPolicy,
ServerImagePullSecrets: config.ServerImagePullSecrets,
AgentImagePullSecrets: config.AgentImagePullSecrets,
},
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.Cluster{}).
For(&v1beta1.Cluster{}).
Watches(&v1.Namespace{}, namespaceEventHandler(&reconciler)).
Owns(&apps.StatefulSet{}).
Owns(&v1.Service{}).
@@ -133,7 +148,7 @@ func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
}
// Enqueue all the Cluster in the namespace
var clusterList v1alpha1.ClusterList
var clusterList v1beta1.ClusterList
if err := r.Client.List(ctx, &clusterList, client.InNamespace(oldNs.Name)); err != nil {
return
}
@@ -151,7 +166,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
log.Info("reconciling cluster")
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
@@ -162,8 +177,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
// Set initial status if not already set
if cluster.Status.Phase == "" || cluster.Status.Phase == v1alpha1.ClusterUnknown {
cluster.Status.Phase = v1alpha1.ClusterProvisioning
if cluster.Status.Phase == "" || cluster.Status.Phase == v1beta1.ClusterUnknown {
cluster.Status.Phase = v1beta1.ClusterProvisioning
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -217,14 +232,14 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1beta1.Cluster) error {
err := c.reconcile(ctx, cluster)
c.updateStatus(cluster, err)
return err
}
func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
var ns v1.Namespace
@@ -236,7 +251,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
cluster.Status.PolicyName = policyName
if found && policyName != "" {
var policy v1alpha1.VirtualClusterPolicy
var policy v1beta1.VirtualClusterPolicy
if err := c.Client.Get(ctx, client.ObjectKey{Name: policyName}, &policy); err != nil {
return err
}
@@ -266,12 +281,12 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
return err
}
s := server.New(cluster, c.Client, token, c.K3SImage, c.K3SImagePullPolicy)
s := server.New(cluster, c.Client, token, c.K3SServerImage, c.K3SServerImagePullPolicy, c.ServerImagePullSecrets)
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
if cluster.Status.ClusterCIDR == "" {
cluster.Status.ClusterCIDR = defaultVirtualClusterCIDR
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
if cluster.Spec.Mode == v1beta1.SharedClusterMode {
cluster.Status.ClusterCIDR = defaultSharedClusterCIDR
}
}
@@ -279,7 +294,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
cluster.Status.ServiceCIDR = cluster.Spec.ServiceCIDR
if cluster.Status.ServiceCIDR == "" {
// in shared mode try to lookup the serviceCIDR
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
if cluster.Spec.Mode == v1beta1.SharedClusterMode {
log.Info("looking up Service CIDR for shared mode")
cluster.Status.ServiceCIDR, err = c.lookupServiceCIDR(ctx)
@@ -291,7 +306,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
}
// in virtual mode assign a default serviceCIDR
if cluster.Spec.Mode == v1alpha1.VirtualClusterMode {
if cluster.Spec.Mode == v1beta1.VirtualClusterMode {
log.Info("assign default service CIDR for virtual mode")
cluster.Status.ServiceCIDR = defaultVirtualServiceCIDR
@@ -337,7 +352,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
}
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP, token string) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring bootstrap secret")
@@ -369,7 +384,7 @@ func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *
}
// ensureKubeconfigSecret will create or update the Secret containing the kubeconfig data from the k3s server
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string, port int) error {
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP string, port int) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring kubeconfig secret")
@@ -407,7 +422,7 @@ func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster
return err
}
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server, serviceIP string) error {
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server, serviceIP string) error {
// create init node config
initServerConfig, err := server.Config(true, serviceIP)
if err != nil {
@@ -443,7 +458,7 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
return nil
}
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring network policy")
@@ -535,7 +550,7 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
return nil
}
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1alpha1.Cluster) (*v1.Service, error) {
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1beta1.Cluster) (*v1.Service, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring cluster service")
@@ -563,7 +578,7 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v
return currentService, nil
}
func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring cluster ingress")
@@ -599,7 +614,7 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1
return nil
}
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) error {
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server) error {
log := ctrl.LoggerFrom(ctx)
// create headless service for the statefulset
@@ -619,6 +634,9 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
return err
}
// Add the finalizer to the StatefulSet so the statefulset controller can handle cleanup.
controllerutil.AddFinalizer(expectedServerStatefulSet, etcdPodFinalizerName)
currentServerStatefulSet := expectedServerStatefulSet.DeepCopy()
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentServerStatefulSet, func() error {
if err := controllerutil.SetControllerReference(cluster, currentServerStatefulSet, c.Scheme); err != nil {
@@ -638,7 +656,7 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
return err
}
func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1beta1.Cluster) error {
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
var err error
@@ -668,12 +686,12 @@ func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1alp
return err
}
func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1beta1.Cluster, serviceIP, token string) error {
config := agent.NewConfig(cluster, c.Client, c.Scheme)
var agentEnsurer agent.ResourceEnsurer
if cluster.Spec.Mode == agent.VirtualNodeMode {
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token, c.K3SImage, c.K3SImagePullPolicy)
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token, c.VirtualAgentImage, c.VirtualAgentImagePullPolicy, c.AgentImagePullSecrets)
} else {
// Assign port from pool if shared agent enabled mirroring of host nodes
kubeletPort := 10250
@@ -697,13 +715,13 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.C
cluster.Status.WebhookPort = webhookPort
}
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token, kubeletPort, webhookPort)
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token, kubeletPort, webhookPort, c.AgentImagePullSecrets)
}
return agentEnsurer.EnsureResources(ctx)
}
func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster, policy v1alpha1.VirtualClusterPolicy) error {
func (c *ClusterReconciler) validate(cluster *v1beta1.Cluster, policy v1beta1.VirtualClusterPolicy) error {
if cluster.Name == ClusterInvalidName {
return fmt.Errorf("%w: invalid cluster name %q", ErrClusterValidation, cluster.Name)
}
@@ -718,6 +736,11 @@ func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster, policy v1alpha1.
}
}
// validate sync policy
if !equality.Semantic.DeepEqual(cluster.Spec.Sync, policy.Spec.Sync) {
return fmt.Errorf("sync configuration %v is not allowed by the policy %q", cluster.Spec.Sync, policy.Name)
}
return nil
}
@@ -799,7 +822,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
}
// validateCustomCACerts will make sure that all the cert secrets exists
func (c *ClusterReconciler) validateCustomCACerts(cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) validateCustomCACerts(cluster *v1beta1.Cluster) error {
credentialSources := cluster.Spec.CustomCAs.Sources
if credentialSources.ClientCA.SecretName == "" ||
credentialSources.ServerCA.SecretName == "" ||

View File

@@ -16,17 +16,17 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alpha1.Cluster) (reconcile.Result, error) {
func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta1.Cluster) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("finalizing Cluster")
// Set the Terminating phase and condition
cluster.Status.Phase = v1alpha1.ClusterTerminating
cluster.Status.Phase = v1beta1.ClusterTerminating
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -39,7 +39,7 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alph
}
// Deallocate ports for kubelet and webhook if used
if cluster.Spec.Mode == v1alpha1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
if cluster.Spec.Mode == v1beta1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
log.Info("dellocating ports for kubelet and webhook")
if err := c.PortAllocator.DeallocateKubeletPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.KubeletPort); err != nil {
@@ -61,7 +61,7 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alph
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) unbindClusterRoles(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) unbindClusterRoles(ctx context.Context, cluster *v1beta1.Cluster) error {
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
var err error

View File

@@ -17,7 +17,7 @@ import (
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
@@ -71,7 +71,12 @@ var _ = BeforeSuite(func() {
ctx, cancel = context.WithCancel(context.Background())
err = cluster.Add(ctx, mgr, "rancher/k3k-kubelet:latest", "", "rancher/k3s", "", 50, portAllocator, &record.FakeRecorder{})
clusterConfig := &cluster.Config{
SharedAgentImage: "rancher/k3k-kubelet:latest",
K3SServerImage: "rancher/k3s",
VirtualAgentImage: "rancher/k3s",
}
err = cluster.Add(ctx, mgr, clusterConfig, 50, portAllocator, &record.FakeRecorder{})
Expect(err).NotTo(HaveOccurred())
go func() {
@@ -94,7 +99,7 @@ func buildScheme() *runtime.Scheme {
err := clientgoscheme.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme

View File

@@ -12,7 +12,7 @@ import (
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/server"
@@ -38,7 +38,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
When("creating a Cluster", func() {
It("will be created with some defaults", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
@@ -48,15 +48,15 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.Mode).To(Equal(v1alpha1.SharedClusterMode))
Expect(cluster.Spec.Mode).To(Equal(v1beta1.SharedClusterMode))
Expect(cluster.Spec.Agents).To(Equal(ptr.To[int32](0)))
Expect(cluster.Spec.Servers).To(Equal(ptr.To[int32](1)))
Expect(cluster.Spec.Version).To(BeEmpty())
Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicPersistenceMode))
Expect(cluster.Spec.Persistence.StorageRequestSize).To(Equal("1G"))
Expect(cluster.Spec.Persistence.Type).To(Equal(v1beta1.DynamicPersistenceMode))
Expect(cluster.Spec.Persistence.StorageRequestSize).To(Equal("2G"))
Expect(cluster.Status.Phase).To(Equal(v1alpha1.ClusterUnknown))
Expect(cluster.Status.Phase).To(Equal(v1beta1.ClusterUnknown))
serverVersion, err := k8s.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
@@ -92,14 +92,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
When("exposing the cluster with nodePort", func() {
It("will have a NodePort service", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{},
},
},
}
@@ -124,14 +124,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
})
It("will have the specified ports exposed when specified", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{
ServerPort: ptr.To[int32](30010),
ETCDPort: ptr.To[int32](30011),
},
@@ -173,14 +173,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
})
It("will not expose the port when out of range", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{
ETCDPort: ptr.To[int32](2222),
},
},
@@ -218,14 +218,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
When("exposing the cluster with loadbalancer", func() {
It("will have a LoadBalancer service with the default ports exposed", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
LoadBalancer: &v1alpha1.LoadBalancerConfig{},
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
LoadBalancer: &v1beta1.LoadBalancerConfig{},
},
},
}
@@ -263,6 +263,26 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
Expect(etcdPort.TargetPort.IntValue()).To(BeEquivalentTo(2379))
})
})
When("exposing the cluster with nodePort and loadbalancer", func() {
It("will fail", func() {
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
LoadBalancer: &v1beta1.LoadBalancerConfig{},
NodePort: &v1beta1.NodePortConfig{},
},
},
}
err := k8sClient.Create(ctx, cluster)
Expect(err).To(HaveOccurred())
})
})
})
})
})

View File

@@ -0,0 +1,38 @@
package cluster
import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/predicate"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func newClusterPredicate() predicate.Predicate {
return predicate.NewPredicateFuncs(func(object client.Object) bool {
owner := metav1.GetControllerOf(object)
return owner != nil &&
owner.Kind == "Cluster" &&
owner.APIVersion == v1beta1.SchemeGroupVersion.String()
})
}
func clusterNamespacedName(object client.Object) types.NamespacedName {
var clusterName string
owner := metav1.GetControllerOf(object)
if owner != nil && owner.Kind == "Cluster" && owner.APIVersion == v1beta1.SchemeGroupVersion.String() {
clusterName = owner.Name
} else {
clusterName = object.GetLabels()[translate.ClusterNameLabel]
}
return types.NamespacedName{
Name: clusterName,
Namespace: object.GetNamespace(),
}
}

View File

@@ -2,36 +2,19 @@ package cluster
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"strings"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
certutil "github.com/rancher/dynamiclistener/cert"
clientv3 "go.etcd.io/etcd/client/v3"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"github.com/rancher/k3k/k3k-kubelet/translate"
)
const (
@@ -43,235 +26,56 @@ type PodReconciler struct {
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
// AddPodController adds a new controller for Pods to the manager.
// It will reconcile the Pods of the Host Cluster with the one of the Virtual Cluster.
func AddPodController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
// initialize a new Reconciler
reconciler := PodReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
return ctrl.NewControllerManagedBy(mgr).
Watches(&v1.Pod{}, handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &apps.StatefulSet{}, handler.OnlyControllerOwner())).
For(&v1.Pod{}).
Named(podController).
WithEventFilter(newClusterPredicate()).
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
Complete(&reconciler)
}
func (p *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("statefulset", req.NamespacedName)
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling pod")
s := strings.Split(req.Name, "-")
if len(s) < 1 {
return reconcile.Result{}, nil
}
if s[0] != "k3k" {
return reconcile.Result{}, nil
}
clusterName := s[1]
var cluster v1alpha1.Cluster
if err := p.Client.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: req.Namespace}, &cluster); err != nil {
var pod v1.Pod
if err := r.Client.Get(ctx, req.NamespacedName, &pod); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
}
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: req.Namespace}
matchingLabels.ApplyToList(listOpts)
var podList v1.PodList
if err := p.Client.List(ctx, &podList, listOpts); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
if len(podList.Items) == 1 {
return reconcile.Result{}, nil
// get cluster from the object
cluster := clusterNamespacedName(&pod)
virtualClient, err := newVirtualClient(ctx, r.Client, cluster.Name, cluster.Namespace)
if err != nil {
return reconcile.Result{}, err
}
for _, pod := range podList.Items {
if err := p.handleServerPod(ctx, cluster, &pod); err != nil {
return reconcile.Result{}, err
if !pod.DeletionTimestamp.IsZero() {
virtName := pod.GetAnnotations()[translate.ResourceNameAnnotation]
virtNamespace := pod.GetAnnotations()[translate.ResourceNamespaceAnnotation]
virtPod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: virtName,
Namespace: virtNamespace,
},
}
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(virtualClient.Delete(ctx, &virtPod))
}
return reconcile.Result{}, nil
}
func (p *PodReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cluster, pod *v1.Pod) error {
log := ctrl.LoggerFrom(ctx)
log.Info("handling server pod")
role, found := pod.Labels["role"]
if !found {
return fmt.Errorf("server pod has no role label")
}
if role != "server" {
log.V(1).Info("pod has a different role: " + role)
return nil
}
// if etcd pod is marked for deletion then we need to remove it from the etcd member list before deletion
if !pod.DeletionTimestamp.IsZero() {
// check if cluster is deleted then remove the finalizer from the pod
if cluster.Name == "" {
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
return nil
}
tlsConfig, err := p.getETCDTLS(ctx, &cluster)
if err != nil {
return err
}
// remove server from etcd
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{
fmt.Sprintf("https://%s.%s:2379", server.ServiceName(cluster.Name), pod.Namespace),
},
TLS: tlsConfig,
})
if err != nil {
return err
}
if err := removePeer(ctx, client, pod.Name, pod.Status.PodIP); err != nil {
return err
}
// remove our finalizer from the list and update it.
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
}
if controllerutil.AddFinalizer(pod, etcdPodFinalizerName) {
return p.Client.Update(ctx, pod)
}
return nil
}
func (p *PodReconciler) getETCDTLS(ctx context.Context, cluster *v1alpha1.Cluster) (*tls.Config, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("generating etcd TLS client certificate", "cluster", cluster)
token, err := p.clusterToken(ctx, cluster)
if err != nil {
return nil, err
}
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
var b *bootstrap.ControlRuntimeBootstrap
if err := retry.OnError(k3kcontroller.Backoff, func(err error) bool {
return true
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, err
}
etcdCert, etcdKey, err := certs.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 0, b.ETCDServerCA.Content, b.ETCDServerCAKey.Content)
if err != nil {
return nil, err
}
clientCert, err := tls.X509KeyPair(etcdCert, etcdKey)
if err != nil {
return nil, err
}
// create rootCA CertPool
cert, err := certutil.ParseCertsPEM([]byte(b.ETCDServerCA.Content))
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
pool.AddCert(cert[0])
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}
// removePeer removes a peer from the cluster. The peer name and IP address must both match.
func removePeer(ctx context.Context, client *clientv3.Client, name, address string) error {
log := ctrl.LoggerFrom(ctx)
log.Info("removing peer from cluster", "name", name, "address", address)
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
defer cancel()
members, err := client.MemberList(ctx)
if err != nil {
return err
}
for _, member := range members.Members {
if !strings.Contains(member.Name, name) {
continue
}
for _, peerURL := range member.PeerURLs {
u, err := url.Parse(peerURL)
if err != nil {
return err
}
if u.Hostname() == address {
log.Info("removing member from etcd", "name", member.Name, "id", member.ID, "address", address)
_, err := client.MemberRemove(ctx, member.ID)
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
return nil
}
return err
}
}
}
return nil
}
func (p *PodReconciler) clusterToken(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
var tokenSecret v1.Secret
nn := types.NamespacedName{
Name: TokenSecretName(cluster.Name),
Namespace: cluster.Namespace,
}
if cluster.Spec.TokenSecretRef != nil {
nn.Name = TokenSecretName(cluster.Name)
}
if err := p.Client.Get(ctx, nn, &tokenSecret); err != nil {
return "", err
}
if _, ok := tokenSecret.Data["token"]; !ok {
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
}
return string(tokenSecret.Data["token"]), nil
}

View File

@@ -16,7 +16,7 @@ import (
v1 "k8s.io/api/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
@@ -39,7 +39,7 @@ type content struct {
// Generate generates the bootstrap for the cluster:
// 1- use the server token to get the bootstrap data from k3s
// 2- save the bootstrap data as a secret
func GenerateBootstrapData(ctx context.Context, cluster *v1alpha1.Cluster, ip, token string) ([]byte, error) {
func GenerateBootstrapData(ctx context.Context, cluster *v1beta1.Cluster, ip, token string) ([]byte, error) {
bootstrap, err := requestBootstrap(token, ip)
if err != nil {
return nil, fmt.Errorf("failed to request bootstrap secret: %w", err)
@@ -162,7 +162,7 @@ func DecodedBootstrap(token, ip string) (*ControlRuntimeBootstrap, error) {
return bootstrap, nil
}
func GetFromSecret(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster) (*ControlRuntimeBootstrap, error) {
func GetFromSecret(ctx context.Context, client client.Client, cluster *v1beta1.Cluster) (*ControlRuntimeBootstrap, error) {
key := types.NamespacedName{
Name: controller.SafeConcatNameWithPrefix(cluster.Name, "bootstrap"),
Namespace: cluster.Namespace,

View File

@@ -8,7 +8,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
@@ -45,15 +45,15 @@ func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) {
}, nil
}
func serverConfigData(serviceIP string, cluster *v1alpha1.Cluster, token string) string {
func serverConfigData(serviceIP string, cluster *v1beta1.Cluster, token string) string {
return "cluster-init: true\nserver: https://" + serviceIP + "\n" + serverOptions(cluster, token)
}
func initConfigData(cluster *v1alpha1.Cluster, token string) string {
func initConfigData(cluster *v1beta1.Cluster, token string) string {
return "cluster-init: true\n" + serverOptions(cluster, token)
}
func serverOptions(cluster *v1alpha1.Cluster, token string) string {
func serverOptions(cluster *v1beta1.Cluster, token string) string {
var opts string
// TODO: generate token if not found

View File

@@ -8,7 +8,7 @@ import (
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
@@ -22,7 +22,7 @@ func IngressName(clusterName string) string {
return controller.SafeConcatNameWithPrefix(clusterName, "ingress")
}
func Ingress(ctx context.Context, cluster *v1alpha1.Cluster) networkingv1.Ingress {
func Ingress(ctx context.Context, cluster *v1beta1.Cluster) networkingv1.Ingress {
ingress := networkingv1.Ingress{
TypeMeta: metav1.TypeMeta{
Kind: "Ingress",
@@ -52,7 +52,7 @@ func Ingress(ctx context.Context, cluster *v1alpha1.Cluster) networkingv1.Ingres
return ingress
}
func ingressRules(cluster *v1alpha1.Cluster) []networkingv1.IngressRule {
func ingressRules(cluster *v1beta1.Cluster) []networkingv1.IngressRule {
var ingressRules []networkingv1.IngressRule
if cluster.Spec.Expose == nil || cluster.Spec.Expose.Ingress == nil {

View File

@@ -18,7 +18,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
@@ -32,22 +32,24 @@ const (
// Server
type Server struct {
cluster *v1alpha1.Cluster
client client.Client
mode string
token string
k3SImage string
k3SImagePullPolicy string
cluster *v1beta1.Cluster
client client.Client
mode string
token string
image string
imagePullPolicy string
imagePullSecrets []string
}
func New(cluster *v1alpha1.Cluster, client client.Client, token string, k3SImage string, k3SImagePullPolicy string) *Server {
func New(cluster *v1beta1.Cluster, client client.Client, token, image, imagePullPolicy string, imagePullSecrets []string) *Server {
return &Server{
cluster: cluster,
client: client,
token: token,
mode: string(cluster.Spec.Mode),
k3SImage: k3SImage,
k3SImagePullPolicy: k3SImagePullPolicy,
cluster: cluster,
client: client,
token: token,
mode: string(cluster.Spec.Mode),
image: image,
imagePullPolicy: imagePullPolicy,
imagePullSecrets: imagePullSecrets,
}
}
@@ -119,7 +121,7 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
{
Name: name,
Image: image,
ImagePullPolicy: v1.PullPolicy(s.k3SImagePullPolicy),
ImagePullPolicy: v1.PullPolicy(s.imagePullPolicy),
Env: []v1.EnvVar{
{
Name: "POD_NAME",
@@ -242,6 +244,11 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
podSpec.Containers[0].Env = append(podSpec.Containers[0].Env, s.cluster.Spec.ServerEnvs...)
// add image pull secrets
for _, imagePullSecret := range s.imagePullSecrets {
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
}
return podSpec
}
@@ -253,12 +260,12 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
persistent bool
)
image := controller.K3SImage(s.cluster, s.k3SImage)
image := controller.K3SImage(s.cluster, s.image)
name := controller.SafeConcatNameWithPrefix(s.cluster.Name, serverName)
replicas = *s.cluster.Spec.Servers
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicPersistenceMode {
if s.cluster.Spec.Persistence.Type == v1beta1.DynamicPersistenceMode {
persistent = true
pvClaim = s.setupDynamicPersistence()
}
@@ -372,7 +379,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
},
},
}
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicPersistenceMode {
if s.cluster.Spec.Persistence.Type == v1beta1.DynamicPersistenceMode {
ss.Spec.VolumeClaimTemplates = []v1.PersistentVolumeClaim{pvClaim}
}

View File

@@ -6,11 +6,11 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
func Service(cluster *v1alpha1.Cluster) *v1.Service {
func Service(cluster *v1beta1.Cluster) *v1.Service {
service := &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
@@ -69,7 +69,7 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
}
// addLoadBalancerPorts adds the load balancer ports to the service
func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1alpha1.LoadBalancerConfig, k3sServerPort, etcdPort v1.ServicePort) {
func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1beta1.LoadBalancerConfig, k3sServerPort, etcdPort v1.ServicePort) {
// If the server port is not specified, use the default port
if loadbalancerConfig.ServerPort == nil {
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)
@@ -90,7 +90,7 @@ func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1alpha1.LoadB
}
// addNodePortPorts adds the node port ports to the service
func addNodePortPorts(service *v1.Service, nodePortConfig v1alpha1.NodePortConfig, k3sServerPort, etcdPort v1.ServicePort) {
func addNodePortPorts(service *v1.Service, nodePortConfig v1beta1.NodePortConfig, k3sServerPort, etcdPort v1.ServicePort) {
// If the server port is not specified Kubernetes will set the node port to a random port between 30000-32767
if nodePortConfig.ServerPort == nil {
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)

View File

@@ -0,0 +1,90 @@
package cluster
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
)
const (
serviceController = "k3k-service-controller"
)
type ServiceReconciler struct {
HostClient ctrlruntimeclient.Client
}
// Add adds a new controller to the manager
func AddServiceController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
reconciler := ServiceReconciler{
HostClient: mgr.GetClient(),
}
return ctrl.NewControllerManagedBy(mgr).
Named(serviceController).
For(&v1.Service{}).
WithEventFilter(newClusterPredicate()).
Complete(&reconciler)
}
func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring service status to virtual cluster")
var hostService v1.Service
if err := r.HostClient.Get(ctx, req.NamespacedName, &hostService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// Some services are owned by the cluster but don't have the annotations set (i.e. the kubelet svc)
// They don't exists in the virtual cluster, so we can skip them
virtualServiceName, virtualServiceNameFound := hostService.Annotations[translate.ResourceNameAnnotation]
virtualServiceNamespace, virtualServiceNamespaceFound := hostService.Annotations[translate.ResourceNamespaceAnnotation]
if !virtualServiceNameFound || !virtualServiceNamespaceFound {
log.V(1).Info(fmt.Sprintf("service %s/%s does not have virtual service annotations, skipping", hostService.Namespace, hostService.Name))
return reconcile.Result{}, nil
}
// get cluster from the object
cluster := clusterNamespacedName(&hostService)
virtualClient, err := newVirtualClient(ctx, r.HostClient, cluster.Name, cluster.Namespace)
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get cluster info: %v", err)
}
if !hostService.DeletionTimestamp.IsZero() {
return reconcile.Result{}, nil
}
virtualServiceKey := types.NamespacedName{
Name: virtualServiceName,
Namespace: virtualServiceNamespace,
}
var virtualService v1.Service
if err := virtualClient.Get(ctx, virtualServiceKey, &virtualService); err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get virtual service: %v", err)
}
if !equality.Semantic.DeepEqual(virtualService.Status.LoadBalancer, hostService.Status.LoadBalancer) {
virtualService.Status.LoadBalancer = hostService.Status.LoadBalancer
if err := virtualClient.Status().Update(ctx, &virtualService); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}

View File

@@ -0,0 +1,320 @@
package cluster
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"strings"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
certutil "github.com/rancher/dynamiclistener/cert"
clientv3 "go.etcd.io/etcd/client/v3"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
)
const (
statefulsetController = "k3k-statefulset-controller"
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
)
type StatefulSetReconciler struct {
Client ctrlruntimeclient.Client
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
func AddStatefulSetController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
// initialize a new Reconciler
reconciler := StatefulSetReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
return ctrl.NewControllerManagedBy(mgr).
For(&apps.StatefulSet{}).
Owns(&v1.Pod{}).
Named(statefulsetController).
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
Complete(&reconciler)
}
func (p *StatefulSetReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling statefulset")
var sts apps.StatefulSet
if err := p.Client.Get(ctx, req.NamespacedName, &sts); err != nil {
// we can ignore the IsNotFound error
// if the stateful set was deleted we have already cleaned up the pods
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// If the StatefulSet is being deleted, we need to remove the finalizers from its pods
// and remove the finalizer from the StatefulSet itself.
if !sts.DeletionTimestamp.IsZero() {
return p.handleDeletion(ctx, &sts)
}
// get cluster name from the object
clusterKey := clusterNamespacedName(&sts)
var cluster v1beta1.Cluster
if err := p.Client.Get(ctx, clusterKey, &cluster); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
}
podList, err := p.listPods(ctx, &sts)
if err != nil {
return reconcile.Result{}, err
}
if len(podList.Items) == 1 {
serverPod := podList.Items[0]
if !serverPod.DeletionTimestamp.IsZero() {
if controllerutil.RemoveFinalizer(&serverPod, etcdPodFinalizerName) {
if err := p.Client.Update(ctx, &serverPod); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
}
for _, pod := range podList.Items {
if err := p.handleServerPod(ctx, cluster, &pod); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
func (p *StatefulSetReconciler) handleServerPod(ctx context.Context, cluster v1beta1.Cluster, pod *v1.Pod) error {
log := ctrl.LoggerFrom(ctx)
log.Info("handling server pod")
if pod.DeletionTimestamp.IsZero() {
if controllerutil.AddFinalizer(pod, etcdPodFinalizerName) {
return p.Client.Update(ctx, pod)
}
return nil
}
// if etcd pod is marked for deletion then we need to remove it from the etcd member list before deletion
// check if cluster is deleted then remove the finalizer from the pod
if cluster.Name == "" {
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
return nil
}
tlsConfig, err := p.getETCDTLS(ctx, &cluster)
if err != nil {
return err
}
// remove server from etcd
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{
fmt.Sprintf("https://%s.%s:2379", server.ServiceName(cluster.Name), pod.Namespace),
},
TLS: tlsConfig,
})
if err != nil {
return err
}
if err := removePeer(ctx, client, pod.Name, pod.Status.PodIP); err != nil {
return err
}
// remove our finalizer from the list and update it.
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
return nil
}
func (p *StatefulSetReconciler) getETCDTLS(ctx context.Context, cluster *v1beta1.Cluster) (*tls.Config, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("generating etcd TLS client certificate", "cluster", cluster)
token, err := p.clusterToken(ctx, cluster)
if err != nil {
return nil, err
}
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
var b *bootstrap.ControlRuntimeBootstrap
if err := retry.OnError(k3kcontroller.Backoff, func(err error) bool {
return true
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, err
}
etcdCert, etcdKey, err := certs.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 0, b.ETCDServerCA.Content, b.ETCDServerCAKey.Content)
if err != nil {
return nil, err
}
clientCert, err := tls.X509KeyPair(etcdCert, etcdKey)
if err != nil {
return nil, err
}
// create rootCA CertPool
cert, err := certutil.ParseCertsPEM([]byte(b.ETCDServerCA.Content))
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
pool.AddCert(cert[0])
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}
// removePeer removes a peer from the cluster. The peer name and IP address must both match.
func removePeer(ctx context.Context, client *clientv3.Client, name, address string) error {
log := ctrl.LoggerFrom(ctx)
log.Info("removing peer from cluster", "name", name, "address", address)
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
defer cancel()
members, err := client.MemberList(ctx)
if err != nil {
return err
}
for _, member := range members.Members {
if !strings.Contains(member.Name, name) {
continue
}
for _, peerURL := range member.PeerURLs {
u, err := url.Parse(peerURL)
if err != nil {
return err
}
if u.Hostname() == address {
log.Info("removing member from etcd", "name", member.Name, "id", member.ID, "address", address)
_, err := client.MemberRemove(ctx, member.ID)
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
return nil
}
return err
}
}
}
return nil
}
func (p *StatefulSetReconciler) clusterToken(ctx context.Context, cluster *v1beta1.Cluster) (string, error) {
var tokenSecret v1.Secret
nn := types.NamespacedName{
Name: TokenSecretName(cluster.Name),
Namespace: cluster.Namespace,
}
if cluster.Spec.TokenSecretRef != nil {
nn.Name = TokenSecretName(cluster.Name)
}
if err := p.Client.Get(ctx, nn, &tokenSecret); err != nil {
return "", err
}
if _, ok := tokenSecret.Data["token"]; !ok {
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
}
return string(tokenSecret.Data["token"]), nil
}
func (p *StatefulSetReconciler) handleDeletion(ctx context.Context, sts *apps.StatefulSet) (ctrl.Result, error) {
podList, err := p.listPods(ctx, sts)
if err != nil {
return reconcile.Result{}, err
}
for _, pod := range podList.Items {
if controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName) {
if err := p.Client.Update(ctx, &pod); err != nil {
return reconcile.Result{}, err
}
}
}
if controllerutil.RemoveFinalizer(sts, etcdPodFinalizerName) {
return reconcile.Result{}, p.Client.Update(ctx, sts)
}
return reconcile.Result{}, nil
}
func (p *StatefulSetReconciler) listPods(ctx context.Context, sts *apps.StatefulSet) (*v1.PodList, error) {
selector, err := metav1.LabelSelectorAsSelector(sts.Spec.Selector)
if err != nil {
return nil, fmt.Errorf("failed to create selector from statefulset: %w", err)
}
listOpts := &ctrlruntimeclient.ListOptions{
Namespace: sts.Namespace,
LabelSelector: selector,
}
var podList v1.PodList
if err := p.Client.List(ctx, &podList, listOpts); err != nil {
return nil, ctrlruntimeclient.IgnoreNotFound(err)
}
return &podList, nil
}

View File

@@ -8,7 +8,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
)
@@ -24,9 +24,9 @@ const (
ReasonTerminating = "Terminating"
)
func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr error) {
func (c *ClusterReconciler) updateStatus(cluster *v1beta1.Cluster, reconcileErr error) {
if !cluster.DeletionTimestamp.IsZero() {
cluster.Status.Phase = v1alpha1.ClusterTerminating
cluster.Status.Phase = v1beta1.ClusterTerminating
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -39,7 +39,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
// Handle validation errors specifically to set the Pending phase.
if errors.Is(reconcileErr, ErrClusterValidation) {
cluster.Status.Phase = v1alpha1.ClusterPending
cluster.Status.Phase = v1beta1.ClusterPending
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -53,7 +53,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
}
if errors.Is(reconcileErr, bootstrap.ErrServerNotReady) {
cluster.Status.Phase = v1alpha1.ClusterProvisioning
cluster.Status.Phase = v1beta1.ClusterProvisioning
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -66,7 +66,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
// If there's an error, but it's not a validation error, the cluster is in a failed state.
if reconcileErr != nil {
cluster.Status.Phase = v1alpha1.ClusterFailed
cluster.Status.Phase = v1beta1.ClusterFailed
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -80,7 +80,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
}
// If we reach here, everything is successful.
cluster.Status.Phase = v1alpha1.ClusterReady
cluster.Status.Phase = v1beta1.ClusterReady
newCondition := metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionTrue,

View File

@@ -15,11 +15,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
func (c *ClusterReconciler) token(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
func (c *ClusterReconciler) token(ctx context.Context, cluster *v1beta1.Cluster) (string, error) {
if cluster.Spec.TokenSecretRef == nil {
return c.ensureTokenSecret(ctx, cluster)
}
@@ -42,7 +42,7 @@ func (c *ClusterReconciler) token(ctx context.Context, cluster *v1alpha1.Cluster
return string(tokenSecret.Data["token"]), nil
}
func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1beta1.Cluster) (string, error) {
log := ctrl.LoggerFrom(ctx)
// check if the secret is already created

View File

@@ -9,7 +9,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -25,19 +25,21 @@ var Backoff = wait.Backoff{
Jitter: 0.1,
}
// K3SImage returns the rancher/k3s image tagged with the specified Version.
// Image returns the rancher/k3s image tagged with the specified Version.
// If Version is empty it will use with the same k8s version of the host cluster,
// stored in the Status object. It will return the untagged version as last fallback.
func K3SImage(cluster *v1alpha1.Cluster, k3SImage string) string {
// stored in the Status object. It will return the latest version as last fallback.
func K3SImage(cluster *v1beta1.Cluster, k3SImage string) string {
image := k3SImage
imageVersion := "latest"
if cluster.Spec.Version != "" {
return k3SImage + ":" + cluster.Spec.Version
imageVersion = cluster.Spec.Version
} else if cluster.Status.HostVersion != "" {
imageVersion = cluster.Status.HostVersion
}
if cluster.Status.HostVersion != "" {
return k3SImage + ":" + cluster.Status.HostVersion
}
return k3SImage
return image + ":" + imageVersion
}
// SafeConcatNameWithPrefix runs the SafeConcatName with extra prefix.

View File

@@ -0,0 +1,77 @@
package controller
import (
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func Test_K3S_Image(t *testing.T) {
type args struct {
cluster *v1beta1.Cluster
k3sImage string
}
tests := []struct {
name string
args args
expectedData string
}{
{
name: "cluster with assigned version spec",
args: args{
k3sImage: "rancher/k3s",
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Spec: v1beta1.ClusterSpec{
Version: "v1.2.3",
},
},
},
expectedData: "rancher/k3s:v1.2.3",
},
{
name: "cluster with empty version spec and assigned hostVersion status",
args: args{
k3sImage: "rancher/k3s",
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Status: v1beta1.ClusterStatus{
HostVersion: "v4.5.6",
},
},
},
expectedData: "rancher/k3s:v4.5.6",
},
{
name: "cluster with empty version spec and empty hostVersion status",
args: args{
k3sImage: "rancher/k3s",
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
},
},
expectedData: "rancher/k3s:latest",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fullImage := K3SImage(tt.args.cluster, tt.args.k3sImage)
assert.Equal(t, tt.expectedData, fullImage)
})
}
}

View File

@@ -17,7 +17,7 @@ import (
networkingv1 "k8s.io/api/networking/v1"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
@@ -39,7 +39,7 @@ func New() *KubeConfig {
}
}
func (k *KubeConfig) Generate(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string, port int) (*clientcmdapi.Config, error) {
func (k *KubeConfig) Generate(ctx context.Context, client client.Client, cluster *v1beta1.Cluster, hostServerIP string, port int) (*clientcmdapi.Config, error) {
bootstrapData, err := bootstrap.GetFromSecret(ctx, client, cluster)
if err != nil {
return nil, err
@@ -93,7 +93,7 @@ func NewConfig(url string, serverCA, clientCert, clientKey []byte) *clientcmdapi
return config
}
func getURLFromService(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string, serverPort int) (string, error) {
func getURLFromService(ctx context.Context, client client.Client, cluster *v1beta1.Cluster, hostServerIP string, serverPort int) (string, error) {
// get the server service to extract the right IP
key := types.NamespacedName{
Name: server.ServiceName(cluster.Name),

View File

@@ -11,11 +11,11 @@ import (
networkingv1 "k8s.io/api/networking/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
// reconcileNamespacePodSecurityLabels will update the labels of the namespace to reconcile the PSA level specified in the VirtualClusterPolicy
func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) {
func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1beta1.VirtualClusterPolicy) {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling PSA labels")
@@ -33,7 +33,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx
namespace.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
// skip the 'warn' only for the privileged PSA level
if psaLevel != v1alpha1.PrivilegedPodSecurityAdmissionLevel {
if psaLevel != v1beta1.PrivilegedPodSecurityAdmissionLevel {
namespace.Labels["pod-security.kubernetes.io/warn"] = string(psaLevel)
namespace.Labels["pod-security.kubernetes.io/warn-version"] = "latest"
}

View File

@@ -11,11 +11,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
)
func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling NetworkPolicy")
@@ -59,7 +59,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Cont
return err
}
func networkPolicy(namespaceName string, policy *v1alpha1.VirtualClusterPolicy, cidrList []string) *networkingv1.NetworkPolicy {
func networkPolicy(namespaceName string, policy *v1beta1.VirtualClusterPolicy, cidrList []string) *networkingv1.NetworkPolicy {
return &networkingv1.NetworkPolicy{
TypeMeta: metav1.TypeMeta{
Kind: "NetworkPolicy",

View File

@@ -21,7 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
)
@@ -46,10 +46,10 @@ func Add(mgr manager.Manager, clusterCIDR string, maxConcurrentReconciles int) e
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.VirtualClusterPolicy{}).
For(&v1beta1.VirtualClusterPolicy{}).
Watches(&v1.Namespace{}, namespaceEventHandler()).
Watches(&v1.Node{}, nodeEventHandler(&reconciler)).
Watches(&v1alpha1.Cluster{}, clusterEventHandler(&reconciler)).
Watches(&v1beta1.Cluster{}, clusterEventHandler(&reconciler)).
Owns(&networkingv1.NetworkPolicy{}).
Owns(&v1.ResourceQuota{}).
Owns(&v1.LimitRange{}).
@@ -129,7 +129,7 @@ func namespaceEventHandler() handler.Funcs {
func nodeEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
// enqueue all the available VirtualClusterPolicies
enqueueAllVCPs := func(ctx context.Context, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
vcpList := &v1alpha1.VirtualClusterPolicyList{}
vcpList := &v1beta1.VirtualClusterPolicyList{}
if err := r.Client.List(ctx, vcpList); err != nil {
return
}
@@ -193,7 +193,7 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
return handler.Funcs{
// When a Cluster is created, if its Namespace has the "policy.k3k.io/policy-name" label
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
cluster, ok := e.Object.(*v1alpha1.Cluster)
cluster, ok := e.Object.(*v1beta1.Cluster)
if !ok {
return
}
@@ -210,8 +210,8 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
// When a Cluster is updated, if its Namespace has the "policy.k3k.io/policy-name" label
// and if some of its spec influenced by the policy changed
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
oldCluster, okOld := e.ObjectOld.(*v1alpha1.Cluster)
newCluster, okNew := e.ObjectNew.(*v1alpha1.Cluster)
oldCluster, okOld := e.ObjectOld.(*v1beta1.Cluster)
newCluster, okNew := e.ObjectNew.(*v1beta1.Cluster)
if !okOld || !okNew {
return
@@ -250,7 +250,7 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling VirtualClusterPolicy")
var policy v1alpha1.VirtualClusterPolicy
var policy v1beta1.VirtualClusterPolicy
if err := c.Client.Get(ctx, req.NamespacedName, &policy); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
@@ -281,7 +281,7 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
return reconcile.Result{}, nil
}
func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx context.Context, policy *v1beta1.VirtualClusterPolicy) error {
if err := c.reconcileMatchingNamespaces(ctx, policy); err != nil {
return err
}
@@ -293,7 +293,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx conte
return nil
}
func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context.Context, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling matching Namespaces")
@@ -340,7 +340,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context
return nil
}
func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling ResourceQuota")
@@ -389,7 +389,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, nam
return err
}
func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling LimitRange")
@@ -437,11 +437,11 @@ func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, nam
return err
}
func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context, namespace *v1.Namespace, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling Clusters")
var clusters v1alpha1.ClusterList
var clusters v1beta1.ClusterList
if err := c.Client.List(ctx, &clusters, client.InNamespace(namespace.Name)); err != nil {
return err
}

View File

@@ -16,7 +16,7 @@ import (
networkingv1 "k8s.io/api/networking/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/policy"
. "github.com/onsi/ginkgo/v2"
@@ -81,7 +81,7 @@ func buildScheme() *runtime.Scheme {
Expect(err).NotTo(HaveOccurred())
err = networkingv1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme

View File

@@ -8,13 +8,14 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/policy"
@@ -25,25 +26,25 @@ import (
var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("VirtualClusterPolicy"), func() {
Context("creating a VirtualClusterPolicy", func() {
It("should have the 'shared' allowedMode", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
Expect(policy.Spec.AllowedMode).To(Equal(v1alpha1.SharedClusterMode))
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{})
Expect(policy.Spec.AllowedMode).To(Equal(v1beta1.SharedClusterMode))
})
It("should have the 'virtual' mode if specified", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
AllowedMode: v1alpha1.VirtualClusterMode,
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
AllowedMode: v1beta1.VirtualClusterMode,
})
Expect(policy.Spec.AllowedMode).To(Equal(v1alpha1.VirtualClusterMode))
Expect(policy.Spec.AllowedMode).To(Equal(v1beta1.VirtualClusterMode))
})
It("should fail for a non-existing mode", func() {
policy := &v1alpha1.VirtualClusterPolicy{
policy := &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "policy-",
},
Spec: v1alpha1.VirtualClusterPolicySpec{
AllowedMode: v1alpha1.ClusterMode("non-existing"),
Spec: v1beta1.VirtualClusterPolicySpec{
AllowedMode: v1beta1.ClusterMode("non-existing"),
},
}
@@ -66,7 +67,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should create a NetworkPolicy", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{})
bindPolicyToNamespace(namespace, policy)
// look for network policies etc
@@ -121,7 +122,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should recreate the NetworkPolicy if deleted", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{})
bindPolicyToNamespace(namespace, policy)
// look for network policy
@@ -163,12 +164,12 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
It("should add and update the proper pod-security labels to the namespace", func() {
var (
privileged = v1alpha1.PrivilegedPodSecurityAdmissionLevel
baseline = v1alpha1.BaselinePodSecurityAdmissionLevel
restricted = v1alpha1.RestrictedPodSecurityAdmissionLevel
privileged = v1beta1.PrivilegedPodSecurityAdmissionLevel
baseline = v1beta1.BaselinePodSecurityAdmissionLevel
restricted = v1beta1.RestrictedPodSecurityAdmissionLevel
)
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
PodSecurityAdmissionLevel: &privileged,
})
@@ -195,8 +196,12 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
// Check baseline
// get policy again
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(policy), policy)
Expect(err).To(Not(HaveOccurred()))
policy.Spec.PodSecurityAdmissionLevel = &baseline
err := k8sClient.Update(ctx, policy)
err = k8sClient.Update(ctx, policy)
Expect(err).To(Not(HaveOccurred()))
// wait a bit for the namespace to be updated
@@ -259,9 +264,9 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should restore the labels if Namespace is updated", func() {
privileged := v1alpha1.PrivilegedPodSecurityAdmissionLevel
privileged := v1beta1.PrivilegedPodSecurityAdmissionLevel
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
PodSecurityAdmissionLevel: &privileged,
})
@@ -303,19 +308,19 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should update Cluster's PriorityClass", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultPriorityClass: "foobar",
})
bindPolicyToNamespace(namespace, policy)
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
@@ -337,7 +342,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should update Cluster's NodeSelector", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
})
bindPolicyToNamespace(namespace, policy)
@@ -345,13 +350,13 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
err := k8sClient.Update(ctx, policy)
Expect(err).To(Not(HaveOccurred()))
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
@@ -373,18 +378,18 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should update the nodeSelector if changed", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
})
bindPolicyToNamespace(namespace, policy)
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
NodeSelector: map[string]string{"label-1": "value-1"},
@@ -421,7 +426,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
// wait a bit and check it's restored
Eventually(func() bool {
var updatedCluster v1alpha1.Cluster
var updatedCluster v1beta1.Cluster
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, &updatedCluster)
@@ -434,7 +439,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should create a ResourceQuota if Quota is enabled", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
Quota: &v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("800m"),
@@ -462,7 +467,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should delete the ResourceQuota if Quota is deleted", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
Quota: &v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("800m"),
@@ -486,8 +491,11 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
WithPolling(time.Second).
Should(BeNil())
// get policy again
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(policy), policy)
Expect(err).To(Not(HaveOccurred()))
policy.Spec.Quota = nil
err := k8sClient.Update(ctx, policy)
err = k8sClient.Update(ctx, policy)
Expect(err).To(Not(HaveOccurred()))
// wait for a bit for the resourceQuota to be deleted
@@ -505,7 +513,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should delete the ResourceQuota if unbound", func() {
clusterPolicy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
clusterPolicy := newPolicy(v1beta1.VirtualClusterPolicySpec{
Quota: &v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("800m"),
@@ -550,10 +558,10 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
})
func newPolicy(spec v1alpha1.VirtualClusterPolicySpec) *v1alpha1.VirtualClusterPolicy {
func newPolicy(spec v1beta1.VirtualClusterPolicySpec) *v1beta1.VirtualClusterPolicy {
GinkgoHelper()
policy := &v1alpha1.VirtualClusterPolicy{
policy := &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "policy-",
},
@@ -566,7 +574,7 @@ func newPolicy(spec v1alpha1.VirtualClusterPolicySpec) *v1alpha1.VirtualClusterP
return policy
}
func bindPolicyToNamespace(namespace *v1.Namespace, pol *v1alpha1.VirtualClusterPolicy) {
func bindPolicyToNamespace(namespace *v1.Namespace, pol *v1beta1.VirtualClusterPolicy) {
GinkgoHelper()
if len(namespace.Labels) == 0 {

View File

@@ -3,50 +3,36 @@ package log
import (
"os"
"github.com/virtual-kubelet/virtual-kubelet/log"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
ctrlruntimezap "sigs.k8s.io/controller-runtime/pkg/log/zap"
)
type Logger struct {
*zap.SugaredLogger
}
func New(debug bool) *Logger {
return &Logger{newZappLogger(debug).Sugar()}
}
func (l *Logger) WithError(err error) log.Logger {
return l
}
func (l *Logger) WithField(string, any) log.Logger {
return l
}
func (l *Logger) WithFields(field log.Fields) log.Logger {
return l
}
func (l *Logger) Named(name string) *Logger {
l.SugaredLogger = l.SugaredLogger.Named(name)
return l
}
func newZappLogger(debug bool) *zap.Logger {
encCfg := zap.NewProductionEncoderConfig()
encCfg.TimeKey = "timestamp"
encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
func New(debug bool, format string) *zap.Logger {
lvl := zap.NewAtomicLevelAt(zap.InfoLevel)
if debug {
lvl = zap.NewAtomicLevelAt(zap.DebugLevel)
}
encoder := zapcore.NewJSONEncoder(encCfg)
core := zapcore.NewCore(&ctrlruntimezap.KubeAwareEncoder{Encoder: encoder}, zapcore.AddSync(os.Stderr), lvl)
encoder := newEncoder(format)
core := zapcore.NewCore(encoder, zapcore.AddSync(os.Stderr), lvl)
return zap.New(core)
}
func newEncoder(format string) zapcore.Encoder {
encCfg := zap.NewProductionEncoderConfig()
encCfg.TimeKey = "timestamp"
encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
var encoder zapcore.Encoder
if format == "console" {
encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder
encoder = zapcore.NewConsoleEncoder(encCfg)
} else {
encoder = zapcore.NewJSONEncoder(encCfg)
}
return &ctrlruntimezap.KubeAwareEncoder{Encoder: encoder}
}

113
tests/cluster_certs_test.go Normal file
View File

@@ -0,0 +1,113 @@
package k3k_test
import (
"context"
"os"
"strings"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
// create custom cert secret
customCertDir := "testdata/customcerts/"
certList := []string{
"server-ca",
"client-ca",
"request-header-ca",
"service",
"etcd-peer-ca",
"etcd-server-ca",
}
for _, certName := range certList {
var cert, key []byte
var err error
filePathPrefix := ""
certfile := certName
if strings.HasPrefix(certName, "etcd") {
filePathPrefix = "etcd/"
certfile = strings.TrimPrefix(certName, "etcd-")
}
if !strings.Contains(certName, "service") {
cert, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".crt")
Expect(err).To(Not(HaveOccurred()))
}
key, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".key")
Expect(err).To(Not(HaveOccurred()))
certSecret := caCertSecret(certName, namespace.Name, cert, key)
err = k8sClient.Create(ctx, certSecret)
Expect(err).To(Not(HaveOccurred()))
}
cluster := NewCluster(namespace.Name)
cluster.Spec.CustomCAs = v1beta1.CustomCAs{
Enabled: true,
Sources: v1beta1.CredentialSources{
ServerCA: v1beta1.CredentialSource{
SecretName: "server-ca",
},
ClientCA: v1beta1.CredentialSource{
SecretName: "client-ca",
},
ETCDServerCA: v1beta1.CredentialSource{
SecretName: "etcd-server-ca",
},
ETCDPeerCA: v1beta1.CredentialSource{
SecretName: "etcd-peer-ca",
},
RequestHeaderCA: v1beta1.CredentialSource{
SecretName: "request-header-ca",
},
ServiceAccountToken: v1beta1.CredentialSource{
SecretName: "service",
},
},
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
})
It("will load the custom certs in the server pod", func() {
ctx := context.Background()
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
// check server-ca.crt
serverCACrtPath := "/var/lib/rancher/k3s/server/tls/server-ca.crt"
serverCACrt, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, serverCACrtPath)
Expect(err).To(Not(HaveOccurred()))
serverCACrtTestFile, err := os.ReadFile("testdata/customcerts/server-ca.crt")
Expect(err).To(Not(HaveOccurred()))
Expect(serverCACrt).To(Equal(serverCACrtTestFile))
})
})

View File

@@ -0,0 +1,215 @@
package k3k_test
import (
"context"
"crypto/x509"
"errors"
"fmt"
"time"
"k8s.io/utils/ptr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("an ephemeral cluster is installed", Label("e2e"), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
virtualCluster = NewVirtualCluster()
})
AfterEach(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
It("can create a nginx pod", func() {
_, _ = virtualCluster.NewNginxPod("")
})
It("deletes the pod in the virtual cluster when deleted from the host", func() {
ctx := context.Background()
pod, _ := virtualCluster.NewNginxPod("")
hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster)
namespacedName := hostTranslator.NamespacedName(pod)
err := k8s.CoreV1().Pods(namespacedName.Namespace).Delete(ctx, namespacedName.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
Eventually(func() bool {
_, err := virtualCluster.Client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, v1.GetOptions{})
return apierrors.IsNotFound(err)
}).
WithPolling(time.Second * 5).
WithTimeout(time.Minute).
Should(BeTrue())
})
It("regenerates the bootstrap secret after a restart", func() {
ctx := context.Background()
_, err := virtualCluster.Client.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeNil())
By("Server pod up and running again")
By("Using old k8s client configuration should fail")
Eventually(func() bool {
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
var unknownAuthorityErr x509.UnknownAuthorityError
return errors.As(err, &unknownAuthorityErr)
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeTrue())
By("Recover new config should succeed")
Eventually(func() error {
virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster)
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeNil())
})
})
var _ = When("a dynamic cluster is installed", Label("e2e"), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
virtualCluster = NewVirtualClusterWithType(v1beta1.DynamicPersistenceMode)
})
AfterEach(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
It("can create a nginx pod", func() {
_, _ = virtualCluster.NewNginxPod("")
})
It("can delete the cluster", func() {
ctx := context.Background()
By("Deleting cluster")
err := k8sClient.Delete(ctx, virtualCluster.Cluster)
Expect(err).To(Not(HaveOccurred()))
Eventually(func() []corev1.Pod {
By("listing the pods in the namespace")
podList, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
GinkgoLogr.Info("podlist", "len", len(podList.Items))
return podList.Items
}).
WithTimeout(2 * time.Minute).
WithPolling(time.Second).
Should(BeEmpty())
})
It("can delete a HA cluster", func() {
ctx := context.Background()
namespace := NewNamespace()
By(fmt.Sprintf("Creating new virtual cluster in namespace %s", namespace.Name))
cluster := NewCluster(namespace.Name)
cluster.Spec.Persistence.Type = v1beta1.DynamicPersistenceMode
cluster.Spec.Servers = ptr.To[int32](2)
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
By(fmt.Sprintf("Created virtual cluster %s/%s", cluster.Namespace, cluster.Name))
virtualCluster := &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
By("Deleting cluster")
err := k8sClient.Delete(ctx, virtualCluster.Cluster)
Expect(err).To(Not(HaveOccurred()))
Eventually(func() []corev1.Pod {
By("listing the pods in the namespace")
podList, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
GinkgoLogr.Info("podlist", "len", len(podList.Items))
return podList.Items
}).
WithTimeout(time.Minute * 3).
WithPolling(time.Second).
Should(BeEmpty())
})
It("uses the same bootstrap secret after a restart", func() {
ctx := context.Background()
_, err := virtualCluster.Client.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
restartServerPod(ctx, virtualCluster)
By("Server pod up and running again")
By("Using old k8s client configuration should succeed")
Eventually(func() error {
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(2 * time.Minute).
WithPolling(time.Second * 5).
Should(BeNil())
})
})

View File

@@ -10,7 +10,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/policy"
@@ -21,7 +21,7 @@ import (
var _ = When("a cluster's status is tracked", Label("e2e"), func() {
var (
namespace *corev1.Namespace
vcp *v1alpha1.VirtualClusterPolicy
vcp *v1beta1.VirtualClusterPolicy
)
// This BeforeEach/AfterEach will create a new namespace and a default policy for each test.
@@ -29,7 +29,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
ctx := context.Background()
namespace = NewNamespace()
vcp = &v1alpha1.VirtualClusterPolicy{
vcp = &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "policy-",
},
@@ -53,7 +53,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
It("should start with Provisioning status and transition to Ready", func() {
ctx := context.Background()
clusterObj := &v1alpha1.Cluster{
clusterObj := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "status-cluster-",
Namespace: namespace.Name,
@@ -68,7 +68,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
err := k8sClient.Get(ctx, clusterKey, clusterObj)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(clusterObj.Status.Phase).To(Equal(v1alpha1.ClusterProvisioning))
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterProvisioning))
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
g.Expect(cond).NotTo(BeNil())
@@ -84,7 +84,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
err := k8sClient.Get(ctx, clusterKey, clusterObj)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(clusterObj.Status.Phase).To(Equal(v1alpha1.ClusterReady))
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterReady))
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
g.Expect(cond).NotTo(BeNil())
@@ -101,13 +101,13 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
It("should be in Pending status with ValidationFailed reason", func() {
ctx := context.Background()
clusterObj := &v1alpha1.Cluster{
clusterObj := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.VirtualClusterMode,
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.VirtualClusterMode,
},
}
Expect(k8sClient.Create(ctx, clusterObj)).To(Succeed())
@@ -119,7 +119,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
err := k8sClient.Get(ctx, clusterKey, clusterObj)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(clusterObj.Status.Phase).To(Equal(v1alpha1.ClusterPending))
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterPending))
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
g.Expect(cond).NotTo(BeNil())

View File

@@ -1,277 +0,0 @@
package k3k_test
import (
"context"
"crypto/x509"
"errors"
"os"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("k3k is installed", Label("e2e"), func() {
It("is in Running status", func() {
// check that the controller is running
Eventually(func() bool {
opts := v1.ListOptions{LabelSelector: "app.kubernetes.io/name=k3k"}
podList, err := k8s.CoreV1().Pods("k3k-system").List(context.Background(), opts)
Expect(err).To(Not(HaveOccurred()))
Expect(podList.Items).To(Not(BeEmpty()))
var isRunning bool
for _, pod := range podList.Items {
if pod.Status.Phase == corev1.PodRunning {
isRunning = true
break
}
}
return isRunning
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
})
var _ = When("a ephemeral cluster is installed", Label("e2e"), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
virtualCluster = NewVirtualCluster()
})
AfterEach(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
It("can create a nginx pod", func() {
_, _ = virtualCluster.NewNginxPod("")
})
It("regenerates the bootstrap secret after a restart", func() {
ctx := context.Background()
_, err := virtualCluster.Client.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeNil())
By("Server pod up and running again")
By("Using old k8s client configuration should fail")
Eventually(func() bool {
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
var unknownAuthorityErr x509.UnknownAuthorityError
return errors.As(err, &unknownAuthorityErr)
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeTrue())
By("Recover new config should succeed")
Eventually(func() error {
virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster)
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeNil())
})
})
var _ = When("a dynamic cluster is installed", func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
cluster.Spec.Persistence.Type = v1alpha1.DynamicPersistenceMode
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
})
AfterEach(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
It("can create a nginx pod", func() {
_, _ = virtualCluster.NewNginxPod("")
})
It("use the same bootstrap secret after a restart", func() {
ctx := context.Background()
_, err := virtualCluster.Client.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
}).
WithTimeout(60 * time.Second).
WithPolling(time.Second * 5).
Should(BeNil())
By("Server pod up and running again")
By("Using old k8s client configuration should succeed")
Eventually(func() error {
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(2 * time.Minute).
WithPolling(time.Second * 5).
Should(BeNil())
})
})
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), func() {
ctx := context.Background()
var virtualCluster *VirtualCluster
BeforeEach(func() {
namespace := NewNamespace()
// create custom cert secret
customCertDir := "testdata/customcerts/"
certList := []string{
"server-ca",
"client-ca",
"request-header-ca",
"service",
"etcd-peer-ca",
"etcd-server-ca",
}
for _, certName := range certList {
var cert, key []byte
var err error
filePathPrefix := ""
certfile := certName
if strings.HasPrefix(certName, "etcd") {
filePathPrefix = "etcd/"
certfile = strings.TrimPrefix(certName, "etcd-")
}
if !strings.Contains(certName, "service") {
cert, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".crt")
Expect(err).To(Not(HaveOccurred()))
}
key, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".key")
Expect(err).To(Not(HaveOccurred()))
certSecret := caCertSecret(certName, namespace.Name, cert, key)
err = k8sClient.Create(ctx, certSecret)
Expect(err).To(Not(HaveOccurred()))
}
cluster := NewCluster(namespace.Name)
cluster.Spec.CustomCAs = v1alpha1.CustomCAs{
Enabled: true,
Sources: v1alpha1.CredentialSources{
ServerCA: v1alpha1.CredentialSource{
SecretName: "server-ca",
},
ClientCA: v1alpha1.CredentialSource{
SecretName: "client-ca",
},
ETCDServerCA: v1alpha1.CredentialSource{
SecretName: "etcd-server-ca",
},
ETCDPeerCA: v1alpha1.CredentialSource{
SecretName: "etcd-peer-ca",
},
RequestHeaderCA: v1alpha1.CredentialSource{
SecretName: "request-header-ca",
},
ServiceAccountToken: v1alpha1.CredentialSource{
SecretName: "service",
},
},
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
})
It("will load the custom certs in the server pod", func() {
_, _ = virtualCluster.NewNginxPod("")
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
// check server-ca.crt
serverCACrtPath := "/var/lib/rancher/k3s/server/tls/server-ca.crt"
serverCACrt, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, serverCACrtPath)
Expect(err).To(Not(HaveOccurred()))
serverCACrtTestFile, err := os.ReadFile("testdata/customcerts/server-ca.crt")
Expect(err).To(Not(HaveOccurred()))
Expect(serverCACrt).To(Equal(serverCACrtTestFile))
})
})

View File

@@ -0,0 +1,582 @@
package k3k_test
import (
"context"
"strings"
"time"
"k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/utils/ptr"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a shared mode cluster update its envs", Label("e2e"), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
// Add initial environment variables for server
cluster.Spec.ServerEnvs = []v1.EnvVar{
{
Name: "TEST_SERVER_ENV_1",
Value: "not_upgraded",
},
{
Name: "TEST_SERVER_ENV_2",
Value: "toBeRemoved",
},
}
// Add initial environment variables for agent
cluster.Spec.AgentEnvs = []v1.EnvVar{
{
Name: "TEST_AGENT_ENV_1",
Value: "not_upgraded",
},
{
Name: "TEST_AGENT_ENV_2",
Value: "toBeRemoved",
},
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
serverEnv1, ok := getEnv(&serverPod, "TEST_SERVER_ENV_1")
Expect(ok).To(BeTrue())
Expect(serverEnv1).To(Equal("not_upgraded"))
serverEnv2, ok := getEnv(&serverPod, "TEST_SERVER_ENV_2")
Expect(ok).To(BeTrue())
Expect(serverEnv2).To(Equal("toBeRemoved"))
aPods := listAgentPods(ctx, virtualCluster)
Expect(len(aPods)).To(Equal(1))
agentPod := aPods[0]
agentEnv1, ok := getEnv(&agentPod, "TEST_AGENT_ENV_1")
Expect(ok).To(BeTrue())
Expect(agentEnv1).To(Equal("not_upgraded"))
agentEnv2, ok := getEnv(&agentPod, "TEST_AGENT_ENV_2")
Expect(ok).To(BeTrue())
Expect(agentEnv2).To(Equal("toBeRemoved"))
})
It("will update server and agent envs when cluster is updated", func() {
Eventually(func(g Gomega) {
var cluster v1beta1.Cluster
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
g.Expect(err).NotTo(HaveOccurred())
// update both agent and server envs
cluster.Spec.ServerEnvs = []v1.EnvVar{
{
Name: "TEST_SERVER_ENV_1",
Value: "upgraded",
},
{
Name: "TEST_SERVER_ENV_3",
Value: "new",
},
}
cluster.Spec.AgentEnvs = []v1.EnvVar{
{
Name: "TEST_AGENT_ENV_1",
Value: "upgraded",
},
{
Name: "TEST_AGENT_ENV_3",
Value: "new",
},
}
err = k8sClient.Update(ctx, &cluster)
g.Expect(err).NotTo(HaveOccurred())
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverEnv1, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv1).To(Equal("upgraded"))
_, ok = getEnv(&serverPods[0], "TEST_SERVER_ENV_2")
g.Expect(ok).To(BeFalse())
serverEnv3, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_3")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv3).To(Equal("new"))
// agent pods
aPods := listAgentPods(ctx, virtualCluster)
g.Expect(len(aPods)).To(Equal(1))
agentEnv1, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(agentEnv1).To(Equal("upgraded"))
_, ok = getEnv(&aPods[0], "TEST_AGENT_ENV_2")
g.Expect(ok).To(BeFalse())
agentEnv3, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_3")
g.Expect(ok).To(BeTrue())
g.Expect(agentEnv3).To(Equal("new"))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 2).
Should(Succeed())
})
})
var _ = When("a shared mode cluster update its server args", Label("e2e"), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
// Add initial args for server
cluster.Spec.ServerArgs = []string{
"--node-label=test_server=not_upgraded",
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
Expect(isArgFound(&serverPod, "--node-label=test_server=not_upgraded")).To(BeTrue())
})
It("will update server args", func() {
Eventually(func(g Gomega) {
var cluster v1beta1.Cluster
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
g.Expect(err).NotTo(HaveOccurred())
cluster.Spec.ServerArgs = []string{
"--node-label=test_server=upgraded",
}
err = k8sClient.Update(ctx, &cluster)
g.Expect(err).NotTo(HaveOccurred())
// server pods
sPods := listServerPods(ctx, virtualCluster)
g.Expect(len(sPods)).To(Equal(1))
g.Expect(isArgFound(&sPods[0], "--node-label=test_server=upgraded")).To(BeTrue())
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 2).
Should(Succeed())
})
})
var _ = When("a virtual mode cluster update its envs", Label("e2e"), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
// Add initial environment variables for server
cluster.Spec.ServerEnvs = []v1.EnvVar{
{
Name: "TEST_SERVER_ENV_1",
Value: "not_upgraded",
},
{
Name: "TEST_SERVER_ENV_2",
Value: "toBeRemoved",
},
}
// Add initial environment variables for agent
cluster.Spec.AgentEnvs = []v1.EnvVar{
{
Name: "TEST_AGENT_ENV_1",
Value: "not_upgraded",
},
{
Name: "TEST_AGENT_ENV_2",
Value: "toBeRemoved",
},
}
cluster.Spec.Mode = v1beta1.VirtualClusterMode
cluster.Spec.Agents = ptr.To(int32(1))
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
serverEnv1, ok := getEnv(&serverPod, "TEST_SERVER_ENV_1")
Expect(ok).To(BeTrue())
Expect(serverEnv1).To(Equal("not_upgraded"))
serverEnv2, ok := getEnv(&serverPod, "TEST_SERVER_ENV_2")
Expect(ok).To(BeTrue())
Expect(serverEnv2).To(Equal("toBeRemoved"))
aPods := listAgentPods(ctx, virtualCluster)
Expect(len(aPods)).To(Equal(1))
agentPod := aPods[0]
agentEnv1, ok := getEnv(&agentPod, "TEST_AGENT_ENV_1")
Expect(ok).To(BeTrue())
Expect(agentEnv1).To(Equal("not_upgraded"))
agentEnv2, ok := getEnv(&agentPod, "TEST_AGENT_ENV_2")
Expect(ok).To(BeTrue())
Expect(agentEnv2).To(Equal("toBeRemoved"))
})
It("will update server and agent envs when cluster is updated", func() {
Eventually(func(g Gomega) {
var cluster v1beta1.Cluster
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
g.Expect(err).NotTo(HaveOccurred())
// update both agent and server envs
cluster.Spec.ServerEnvs = []v1.EnvVar{
{
Name: "TEST_SERVER_ENV_1",
Value: "upgraded",
},
{
Name: "TEST_SERVER_ENV_3",
Value: "new",
},
}
cluster.Spec.AgentEnvs = []v1.EnvVar{
{
Name: "TEST_AGENT_ENV_1",
Value: "upgraded",
},
{
Name: "TEST_AGENT_ENV_3",
Value: "new",
},
}
err = k8sClient.Update(ctx, &cluster)
g.Expect(err).NotTo(HaveOccurred())
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverEnv1, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv1).To(Equal("upgraded"))
_, ok = getEnv(&serverPods[0], "TEST_SERVER_ENV_2")
g.Expect(ok).To(BeFalse())
serverEnv3, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_3")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv3).To(Equal("new"))
// agent pods
aPods := listAgentPods(ctx, virtualCluster)
g.Expect(len(aPods)).To(Equal(1))
agentEnv1, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(agentEnv1).To(Equal("upgraded"))
_, ok = getEnv(&aPods[0], "TEST_AGENT_ENV_2")
g.Expect(ok).To(BeFalse())
agentEnv3, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_3")
g.Expect(ok).To(BeTrue())
g.Expect(agentEnv3).To(Equal("new"))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 2).
Should(Succeed())
})
})
var _ = When("a virtual mode cluster update its server args", Label("e2e"), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
// Add initial args for server
cluster.Spec.ServerArgs = []string{
"--node-label=test_server=not_upgraded",
}
cluster.Spec.Mode = v1beta1.VirtualClusterMode
cluster.Spec.Agents = ptr.To(int32(1))
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
Expect(isArgFound(&serverPod, "--node-label=test_server=not_upgraded")).To(BeTrue())
})
It("will update server args", func() {
Eventually(func(g Gomega) {
var cluster v1beta1.Cluster
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
g.Expect(err).NotTo(HaveOccurred())
cluster.Spec.ServerArgs = []string{
"--node-label=test_server=upgraded",
}
err = k8sClient.Update(ctx, &cluster)
g.Expect(err).NotTo(HaveOccurred())
// server pods
sPods := listServerPods(ctx, virtualCluster)
g.Expect(len(sPods)).To(Equal(1))
g.Expect(isArgFound(&sPods[0], "--node-label=test_server=upgraded")).To(BeTrue())
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 2).
Should(Succeed())
})
})
var _ = When("a shared mode cluster update its version", Label("e2e"), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
// Add initial version
cluster.Spec.Version = "v1.31.13-k3s1"
// need to enable persistence for this
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
Expect(serverPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
nginxPod, _ = virtualCluster.NewNginxPod("")
})
It("will update server version when version spec is updated", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
Expect(err).NotTo(HaveOccurred())
// update cluster version
cluster.Spec.Version = "v1.32.8-k3s1"
err = k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
Eventually(func(g Gomega) {
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
condIndex, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
g.Expect(condIndex).NotTo(Equal(-1))
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
g.Expect(serverPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
clusterVersion, err := virtualCluster.Client.Discovery().ServerVersion()
g.Expect(err).To(BeNil())
g.Expect(clusterVersion.String()).To(Equal(strings.ReplaceAll(cluster.Spec.Version, "-", "+")))
_, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
condIndex, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(condIndex).NotTo(Equal(-1))
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
})
var _ = When("a virtual mode cluster update its version", Label("e2e"), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
// Add initial version
cluster.Spec.Version = "v1.31.13-k3s1"
cluster.Spec.Mode = v1beta1.VirtualClusterMode
cluster.Spec.Agents = ptr.To(int32(1))
// need to enable persistence for this
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
Expect(serverPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
aPods := listAgentPods(ctx, virtualCluster)
Expect(len(aPods)).To(Equal(1))
agentPod := aPods[0]
Expect(agentPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
nginxPod, _ = virtualCluster.NewNginxPod("")
})
It("will update server version when version spec is updated", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
Expect(err).NotTo(HaveOccurred())
// update cluster version
cluster.Spec.Version = "v1.32.8-k3s1"
err = k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
Eventually(func(g Gomega) {
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
condIndex, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
g.Expect(condIndex).NotTo(Equal(-1))
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
g.Expect(serverPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
// agent pods
agentPods := listAgentPods(ctx, virtualCluster)
g.Expect(len(agentPods)).To(Equal(1))
agentPod := agentPods[0]
condIndex, cond = pod.GetPodCondition(&agentPod.Status, v1.PodReady)
g.Expect(condIndex).NotTo(Equal(-1))
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
g.Expect(agentPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
clusterVersion, err := virtualCluster.Client.Discovery().ServerVersion()
g.Expect(err).To(BeNil())
g.Expect(clusterVersion.String()).To(Equal(strings.ReplaceAll(cluster.Spec.Version, "-", "+")))
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
condIndex, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(condIndex).NotTo(Equal(-1))
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
})

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"net/url"
"strings"
"sync"
"time"
@@ -20,7 +21,7 @@ import (
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
@@ -29,18 +30,27 @@ import (
)
type VirtualCluster struct {
Cluster *v1alpha1.Cluster
Cluster *v1beta1.Cluster
RestConfig *rest.Config
Client *kubernetes.Clientset
}
func NewVirtualCluster() *VirtualCluster {
func NewVirtualCluster() *VirtualCluster { // By default, create an ephemeral cluster
GinkgoHelper()
return NewVirtualClusterWithType(v1beta1.EphemeralPersistenceMode)
}
func NewVirtualClusterWithType(persistenceType v1beta1.PersistenceMode) *VirtualCluster {
GinkgoHelper()
namespace := NewNamespace()
By(fmt.Sprintf("Creating new virtual cluster in namespace %s", namespace.Name))
cluster := NewCluster(namespace.Name)
cluster.Spec.Persistence.Type = persistenceType
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
@@ -80,7 +90,7 @@ func NewVirtualClusters(n int) []*VirtualCluster {
func NewNamespace() *corev1.Namespace {
GinkgoHelper()
namespace := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
namespace := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-", Labels: map[string]string{"e2e": "true"}}}
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
@@ -116,70 +126,85 @@ func deleteNamespace(name string) {
Expect(err).To(Not(HaveOccurred()))
}
func NewCluster(namespace string) *v1alpha1.Cluster {
return &v1alpha1.Cluster{
func NewCluster(namespace string) *v1beta1.Cluster {
return &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
Expose: &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{},
},
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.EphemeralPersistenceMode,
Persistence: v1beta1.PersistenceConfig{
Type: v1beta1.EphemeralPersistenceMode,
},
ServerArgs: []string{
"--disable-network-policy",
},
},
}
}
func CreateCluster(cluster *v1alpha1.Cluster) {
func CreateCluster(cluster *v1beta1.Cluster) {
GinkgoHelper()
ctx := context.Background()
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
By("Waiting for cluster to be ready")
// check that the server Pod and the Kubelet are in Ready state
Eventually(func() bool {
podList, err := k8s.CoreV1().Pods(cluster.Namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
serverRunning := false
kubeletRunning := false
// all the servers and agents needs to be in a running phase
var serversReady, agentsReady int
for _, pod := range podList.Items {
imageName := pod.Spec.Containers[0].Image
imageName = strings.Split(imageName, ":")[0] // remove tag
switch imageName {
case "rancher/k3s":
serverRunning = pod.Status.Phase == corev1.PodRunning
case "rancher/k3k-kubelet":
kubeletRunning = pod.Status.Phase == corev1.PodRunning
if pod.Labels["role"] == "server" {
GinkgoLogr.Info(fmt.Sprintf("server pod=%s/%s status=%s", pod.Namespace, pod.Name, pod.Status.Phase))
if pod.Status.Phase == corev1.PodRunning {
serversReady++
}
}
if serverRunning && kubeletRunning {
return true
if pod.Labels["type"] == "agent" {
GinkgoLogr.Info(fmt.Sprintf("agent pod=%s/%s status=%s", pod.Namespace, pod.Name, pod.Status.Phase))
if pod.Status.Phase == corev1.PodRunning {
agentsReady++
}
}
}
return false
expectedServers := int(*cluster.Spec.Servers)
expectedAgents := int(*cluster.Spec.Agents)
By(fmt.Sprintf("serversReady=%d/%d agentsReady=%d/%d", serversReady, expectedServers, agentsReady, expectedAgents))
// the server pods should equal the expected servers, but since in shared mode we also have the kubelet is fine to have more than one
if (serversReady != expectedServers) || (agentsReady < expectedAgents) {
return false
}
return true
}).
WithTimeout(time.Minute * 2).
WithTimeout(time.Minute * 5).
WithPolling(time.Second * 5).
Should(BeTrue())
}
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
func NewVirtualK8sClient(cluster *v1alpha1.Cluster) *kubernetes.Clientset {
func NewVirtualK8sClient(cluster *v1beta1.Cluster) *kubernetes.Clientset {
virtualK8sClient, _ := NewVirtualK8sClientAndConfig(cluster)
return virtualK8sClient
}
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
func NewVirtualK8sClientAndConfig(cluster *v1alpha1.Cluster) (*kubernetes.Clientset, *rest.Config) {
func NewVirtualK8sClientAndConfig(cluster *v1beta1.Cluster) (*kubernetes.Clientset, *rest.Config) {
GinkgoHelper()
var (
@@ -239,6 +264,10 @@ func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
var podIP string
// only check the pod on the host cluster if the mode is shared mode
if c.Cluster.Spec.Mode != v1beta1.SharedClusterMode {
return nginxPod, ""
}
// check that the nginx Pod is up and running in the host cluster
Eventually(func() bool {
podList, err := k8s.CoreV1().Pods(c.Cluster.Namespace).List(ctx, v1.ListOptions{})
@@ -302,3 +331,84 @@ func (c *VirtualCluster) ExecCmd(pod *corev1.Pod, command string) (string, strin
return stdout.String(), stderr.String(), err
}
func restartServerPod(ctx context.Context, virtualCluster *VirtualCluster) {
GinkgoHelper()
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
}).WithTimeout(60 * time.Second).WithPolling(time.Second * 5).Should(BeNil())
}
func listServerPods(ctx context.Context, virtualCluster *VirtualCluster) []corev1.Pod {
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
return serverPods.Items
}
func listAgentPods(ctx context.Context, virtualCluster *VirtualCluster) []corev1.Pod {
labelSelector := fmt.Sprintf("cluster=%s,type=agent,mode=%s", virtualCluster.Cluster.Name, virtualCluster.Cluster.Spec.Mode)
agentPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
return agentPods.Items
}
// getEnv will get an environment variable from a pod it will return empty string if not found
func getEnv(pod *corev1.Pod, envName string) (string, bool) {
container := pod.Spec.Containers[0]
for _, envVar := range container.Env {
if envVar.Name == envName {
return envVar.Value, true
}
}
return "", false
}
// isArgFound will return true if the argument passed to the function is found in container args
func isArgFound(pod *corev1.Pod, arg string) bool {
container := pod.Spec.Containers[0]
for _, cmd := range container.Command {
if strings.Contains(cmd, arg) {
return true
}
}
return false
}
func getServerIP(ctx context.Context, cfg *rest.Config) (string, error) {
if k3sContainer != nil {
return k3sContainer.ContainerIP(ctx)
}
u, err := url.Parse(cfg.Host)
if err != nil {
return "", err
}
// If Host includes a port, u.Hostname() extracts just the hostname part
return u.Hostname(), nil
}

View File

@@ -0,0 +1,35 @@
package k3k_test
import (
"context"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("k3k is installed", Label("e2e"), func() {
It("is in Running status", func() {
// check that the controller is running
Eventually(func() bool {
opts := v1.ListOptions{LabelSelector: "app.kubernetes.io/name=k3k"}
podList, err := k8s.CoreV1().Pods(k3kNamespace).List(context.Background(), opts)
Expect(err).To(Not(HaveOccurred()))
Expect(podList.Items).To(Not(BeEmpty()))
for _, pod := range podList.Items {
if pod.Status.Phase == corev1.PodRunning {
return true
}
}
return false
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
})

View File

@@ -7,7 +7,9 @@ import (
"io"
"maps"
"os"
"os/exec"
"path"
"strings"
"testing"
"time"
@@ -17,7 +19,9 @@ import (
"go.uber.org/zap"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart/loader"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
@@ -28,66 +32,74 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
const (
k3kNamespace = "k3k-system"
k3kName = "k3k"
)
func TestTests(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Tests Suite")
}
var (
k3sContainer *k3s.K3sContainer
hostIP string
restcfg *rest.Config
k8s *kubernetes.Clientset
k8sClient client.Client
kubeconfigPath string
k3sContainer *k3s.K3sContainer
hostIP string
restcfg *rest.Config
k8s *kubernetes.Clientset
k8sClient client.Client
kubeconfigPath string
repo string
helmActionConfig *action.Configuration
)
var _ = BeforeSuite(func() {
var err error
ctx := context.Background()
GinkgoWriter.Println("GOCOVERDIR:", os.Getenv("GOCOVERDIR"))
k3sContainer, err = k3s.Run(ctx, "rancher/k3s:v1.32.1-k3s1")
Expect(err).To(Not(HaveOccurred()))
repo = os.Getenv("REPO")
if repo == "" {
repo = "rancher"
}
hostIP, err = k3sContainer.ContainerIP(ctx)
Expect(err).To(Not(HaveOccurred()))
_, dockerInstallEnabled := os.LookupEnv("K3K_DOCKER_INSTALL")
GinkgoWriter.Println("K3s containerIP: " + hostIP)
if dockerInstallEnabled {
installK3SDocker(ctx)
initKubernetesClient(ctx)
installK3kChart()
} else {
initKubernetesClient(ctx)
}
kubeconfig, err := k3sContainer.GetKubeConfig(context.Background())
Expect(err).To(Not(HaveOccurred()))
tmpFile, err := os.CreateTemp("", "kubeconfig-")
Expect(err).To(Not(HaveOccurred()))
_, err = tmpFile.Write(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
Expect(tmpFile.Close()).To(Succeed())
kubeconfigPath = tmpFile.Name()
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
DeferCleanup(os.Remove, kubeconfigPath)
initKubernetesClient(kubeconfig)
installK3kChart(kubeconfig)
patchPVC(ctx, k8s)
})
func initKubernetesClient(kubeconfig []byte) {
var err error
func initKubernetesClient(ctx context.Context) {
var (
err error
kubeconfig []byte
)
kubeconfigPath := os.Getenv("KUBECONFIG")
Expect(kubeconfigPath).To(Not(BeEmpty()))
kubeconfig, err = os.ReadFile(kubeconfigPath)
Expect(err).To(Not(HaveOccurred()))
restcfg, err = clientcmd.RESTConfigFromKubeConfig(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
hostIP, err = getServerIP(ctx, restcfg)
Expect(err).To(Not(HaveOccurred()))
k8s, err = kubernetes.NewForConfig(restcfg)
Expect(err).To(Not(HaveOccurred()))
@@ -97,110 +109,288 @@ func initKubernetesClient(kubeconfig []byte) {
logger, err := zap.NewDevelopment()
Expect(err).NotTo(HaveOccurred())
log.SetLogger(zapr.NewLogger(logger))
}
func installK3kChart(kubeconfig []byte) {
func buildScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
err := corev1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
}
func installK3SDocker(ctx context.Context) {
var (
err error
kubeconfig []byte
)
k3sHostVersion := os.Getenv("K3S_HOST_VERSION")
if k3sHostVersion == "" {
k3sHostVersion = "v1.32.1+k3s1"
}
k3sHostVersion = strings.ReplaceAll(k3sHostVersion, "+", "-")
k3sContainer, err = k3s.Run(ctx, "rancher/k3s:"+k3sHostVersion)
Expect(err).To(Not(HaveOccurred()))
containerIP, err := k3sContainer.ContainerIP(ctx)
Expect(err).To(Not(HaveOccurred()))
GinkgoWriter.Println("K3s containerIP: " + containerIP)
kubeconfig, err = k3sContainer.GetKubeConfig(context.Background())
Expect(err).To(Not(HaveOccurred()))
tmpFile, err := os.CreateTemp("", "kubeconfig-")
Expect(err).To(Not(HaveOccurred()))
_, err = tmpFile.Write(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
Expect(tmpFile.Close()).To(Succeed())
kubeconfigPath = tmpFile.Name()
err = k3sContainer.LoadImages(ctx, repo+"/k3k:dev", repo+"/k3k-kubelet:dev")
Expect(err).To(Not(HaveOccurred()))
DeferCleanup(os.Remove, kubeconfigPath)
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
GinkgoWriter.Print(kubeconfigPath)
GinkgoWriter.Print(string(kubeconfig))
}
func installK3kChart() {
pwd, err := os.Getwd()
Expect(err).To(Not(HaveOccurred()))
k3kChart, err := loader.Load(path.Join(pwd, "../charts/k3k"))
Expect(err).To(Not(HaveOccurred()))
actionConfig := new(action.Configuration)
helmActionConfig = new(action.Configuration)
kubeconfig, err := os.ReadFile(kubeconfigPath)
Expect(err).To(Not(HaveOccurred()))
restClientGetter, err := NewRESTClientGetter(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
releaseName := "k3k"
releaseNamespace := "k3k-system"
err = actionConfig.Init(restClientGetter, releaseNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
err = helmActionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
GinkgoWriter.Printf("helm debug: "+format+"\n", v...)
})
Expect(err).To(Not(HaveOccurred()))
iCli := action.NewInstall(actionConfig)
iCli.ReleaseName = releaseName
iCli.Namespace = releaseNamespace
iCli := action.NewInstall(helmActionConfig)
iCli.ReleaseName = k3kName
iCli.Namespace = k3kNamespace
iCli.CreateNamespace = true
iCli.Timeout = time.Minute
iCli.Wait = true
imageMap, _ := k3kChart.Values["image"].(map[string]any)
controllerMap, _ := k3kChart.Values["controller"].(map[string]any)
imageMap, _ := controllerMap["image"].(map[string]any)
maps.Copy(imageMap, map[string]any{
"repository": "rancher/k3k",
"repository": repo + "/k3k",
"tag": "dev",
"pullPolicy": "IfNotPresent",
})
sharedAgentMap, _ := k3kChart.Values["sharedAgent"].(map[string]any)
agentMap, _ := k3kChart.Values["agent"].(map[string]any)
sharedAgentMap, _ := agentMap["shared"].(map[string]any)
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
maps.Copy(sharedAgentImageMap, map[string]any{
"repository": "rancher/k3k-kubelet",
"repository": repo + "/k3k-kubelet",
"tag": "dev",
})
err = k3sContainer.LoadImages(context.Background(), "rancher/k3k:dev", "rancher/k3k-kubelet:dev")
Expect(err).To(Not(HaveOccurred()))
release, err := iCli.Run(k3kChart, k3kChart.Values)
Expect(err).To(Not(HaveOccurred()))
GinkgoWriter.Printf("Release %s installed in %s namespace\n", release.Name, release.Namespace)
}
var _ = AfterSuite(func() {
// dump k3s logs
readCloser, err := k3sContainer.Logs(context.Background())
func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
pvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "coverage-data-pvc",
Namespace: k3kNamespace,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.VolumeResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("100M"),
},
},
},
}
_, err := clientset.CoreV1().PersistentVolumeClaims(k3kNamespace).Create(ctx, pvc, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
logs, err := io.ReadAll(readCloser)
patchData := []byte(`
{
"spec": {
"template": {
"spec": {
"volumes": [
{
"name": "tmp-covdata",
"persistentVolumeClaim": {
"claimName": "coverage-data-pvc"
}
}
],
"containers": [
{
"name": "k3k",
"volumeMounts": [
{
"name": "tmp-covdata",
"mountPath": "/tmp/covdata"
}
],
"env": [
{
"name": "GOCOVERDIR",
"value": "/tmp/covdata"
}
]
}
]
}
}
}
}`)
GinkgoWriter.Printf("Applying patch to deployment '%s' in namespace '%s'...\n", k3kName, k3kNamespace)
_, err = clientset.AppsV1().Deployments(k3kNamespace).Patch(
ctx,
k3kName,
types.StrategicMergePatchType,
patchData,
metav1.PatchOptions{},
)
Expect(err).To(Not(HaveOccurred()))
logfile := path.Join(os.TempDir(), "k3s.log")
err = os.WriteFile(logfile, logs, 0o644)
Expect(err).To(Not(HaveOccurred()))
Eventually(func() bool {
GinkgoWriter.Println("Checking K3k deployment status")
GinkgoWriter.Println("k3s logs written to: " + logfile)
dep, err := clientset.AppsV1().Deployments(k3kNamespace).Get(ctx, k3kName, metav1.GetOptions{})
Expect(err).To(Not(HaveOccurred()))
// dump k3k controller logs
readCloser, err = k3sContainer.Logs(context.Background())
Expect(err).To(Not(HaveOccurred()))
writeLogs("k3s.log", readCloser)
// 1. Check if the controller has observed the latest generation
if dep.Generation > dep.Status.ObservedGeneration {
GinkgoWriter.Printf("K3k deployment generation: %d, observed generation: %d\n", dep.Generation, dep.Status.ObservedGeneration)
return false
}
// dump k3k logs
writeK3kLogs()
// 2. Check if all replicas have been updated
if dep.Spec.Replicas != nil && dep.Status.UpdatedReplicas < *dep.Spec.Replicas {
GinkgoWriter.Printf("K3k deployment replicas: %d, updated replicas: %d\n", *dep.Spec.Replicas, dep.Status.UpdatedReplicas)
return false
}
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
})
// 3. Check if all updated replicas are available
if dep.Status.AvailableReplicas < dep.Status.UpdatedReplicas {
GinkgoWriter.Printf("K3k deployment availabl replicas: %d, updated replicas: %d\n", dep.Status.AvailableReplicas, dep.Status.UpdatedReplicas)
return false
}
func buildScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
err := corev1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
return true
}).
MustPassRepeatedly(5).
WithPolling(time.Second).
WithTimeout(time.Second * 30).
Should(BeTrue())
}
func writeK3kLogs() {
var (
err error
podList corev1.PodList
)
var _ = AfterSuite(func() {
ctx := context.Background()
err = k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: "k3k-system"})
goCoverDir := os.Getenv("GOCOVERDIR")
if goCoverDir == "" {
goCoverDir = path.Join(os.TempDir(), "covdata")
Expect(os.MkdirAll(goCoverDir, 0o755)).To(Succeed())
}
dumpK3kCoverageData(ctx, goCoverDir)
if k3sContainer != nil {
// dump k3s logs
k3sLogs, err := k3sContainer.Logs(ctx)
Expect(err).To(Not(HaveOccurred()))
writeLogs("k3s.log", k3sLogs)
// dump k3k controller logs
k3kLogs := getK3kLogs(ctx)
writeLogs("k3k.log", k3kLogs)
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
}
})
// dumpK3kCoverageData will kill the K3k controller container to force it to dump the coverage data.
// It will then download the files with kubectl cp into the specified folder. If the folder doesn't exists it will be created.
func dumpK3kCoverageData(ctx context.Context, folder string) {
GinkgoWriter.Println("Restarting k3k controller...")
var podList corev1.PodList
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
Expect(err).To(Not(HaveOccurred()))
k3kPod := podList.Items[0]
req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &corev1.PodLogOptions{})
cmd := exec.Command("kubectl", "exec", "-n", k3kNamespace, k3kPod.Name, "-c", "k3k", "--", "kill", "1")
output, err := cmd.CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
Eventually(func() corev1.PodPhase {
var pod corev1.Pod
key := types.NamespacedName{
Namespace: k3kNamespace,
Name: k3kPod.Name,
}
err = k8sClient.Get(ctx, key, &pod)
Expect(err).To(Not(HaveOccurred()))
GinkgoWriter.Printf("K3k controller status: %s\n", pod.Status.Phase)
return pod.Status.Phase
}).
MustPassRepeatedly(5).
WithPolling(time.Second * 2).
WithTimeout(time.Minute).
Should(Equal(corev1.PodRunning))
GinkgoWriter.Printf("Downloading covdata from k3k controller to %s\n", folder)
cmd = exec.Command("kubectl", "cp", fmt.Sprintf("%s/%s:/tmp/covdata", k3kNamespace, k3kPod.Name), folder)
output, err = cmd.CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
}
func getK3kLogs(ctx context.Context) io.ReadCloser {
var podList corev1.PodList
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
Expect(err).To(Not(HaveOccurred()))
k3kPod := podList.Items[0]
req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &corev1.PodLogOptions{Previous: true})
podLogs, err := req.Stream(ctx)
Expect(err).To(Not(HaveOccurred()))
writeLogs("k3k.log", podLogs)
return podLogs
}
func writeLogs(filename string, logs io.ReadCloser) {
@@ -223,7 +413,7 @@ func readFileWithinPod(ctx context.Context, client *kubernetes.Clientset, config
stderr, err := podExec(ctx, client, config, namespace, name, command, nil, output)
if err != nil || len(stderr) > 0 {
return nil, fmt.Errorf("faile to read the following file %s: %v", path, err)
return nil, fmt.Errorf("failed to read the following file %s: %v", path, err)
}
return output.Bytes(), nil