Compare commits

...

58 Commits

Author SHA1 Message Date
Enrico Candino
c34cf9ce94 added virtual mode conformance tests (#534) 2025-10-28 13:47:31 +01:00
Enrico Candino
bf70e0d171 Updated Cluster and VirtualClusterPolicy spec for sync and loadbalancer (#528)
* add default false for ingress and priorityClass, cleanup tests and added new tests

* fix typo for loadBalancer

* fix test aligning VirtualClusterPolicy SyncConfig

* set required enabled field, revert pointer on optional SyncConfig

* update samples
2025-10-24 17:02:26 +02:00
Enrico Candino
cebf6594c4 switch to text log as default (#529) 2025-10-24 13:42:41 +02:00
Enrico Candino
075d72df5d Cleanup of customCAs spec (#527)
* cleanup spec from customCAs when omitted

* add enabled default for customCAs
2025-10-23 22:11:44 +02:00
Enrico Candino
ee7eac89ce Enhance logging and update Helm installation parameters for better debugging and cluster management (#519) 2025-10-22 14:55:47 +02:00
Enrico Candino
514fdf6b86 Fix for flaky test (#523)
* fix for flaky test

* fix lint

* check ContainersReady condition
2025-10-21 18:19:36 +02:00
Hussein Galal
730e4e1c79 Fix pseudo PV deletion (#511)
* Fix pseudo PV deletion

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix pseudo PV deletion

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-18 00:56:50 +02:00
Hussein Galal
a3076af38f Increase timeout and add timeout option (#514)
* Increase timeout and add timeout option

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Increase timeout and add timeout option

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-17 16:51:40 +03:00
Hussein Galal
89dc352bea Scale up/down tests for virtual and shared mode (#508)
* Scale up/down tests for virtual and shared mode

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* defer cleanup and more fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add labels to e2e tests and divide the workload

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add labels to e2e tests and divide the workload

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add validate job to e2e test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix label filters for e2e tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix makefile

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* use constants for e2e tests labels

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix labels

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-15 17:01:14 +03:00
Enrico Candino
7644406eeb Fix for flaky test (#509)
* fix for flaky test and fix for PVC creation

* fix lint
2025-10-15 11:31:45 +02:00
Enrico Candino
2206632dcc bump charts (#507) 2025-10-14 15:19:00 +02:00
Enrico Candino
8ffdc9bafd renaming webhook (#506) 2025-10-13 17:25:17 +02:00
Enrico Candino
594c2571c3 promoted v1alpha1 resources to v1beta1 (#505) 2025-10-13 17:24:56 +02:00
Hussein Galal
12971f55a6 Add k8s version upgrade test (#503)
* Add k8s version upgrade test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove unused functions

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-13 17:14:25 +03:00
Enrico Candino
99f750525f Fix extraEnv and other Helm values (#500)
* fix for extraEnv

* moved env var to flags

* changed resources as object

* renamed replicaCount to replicas

* cleanup spaces

* moved some values and spacing

* renamed some flags
2025-10-13 12:50:07 +02:00
Hussein Galal
a0fd472841 Use K3S host cluster for E2E tests (#492)
* Add kubeconfig to e2e_tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add E2E_KUBECONFIG env variable

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix yaml permissions for kubeconfig

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix image name and use ttl.sh

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add uuidgen result to a file

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add hostIP

* Add k3s version to e2e test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove comment

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove virtual mode tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix failed test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add KUBECONFIG env variable to the make install

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add k3kcli to github_path

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Use docker installation for testing the cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix test cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-08 15:39:35 +03:00
Enrico Candino
7387fc1b23 Fix Service reconciliation error loop (#497)
* fix service reconciliation error by adding checks for virtual service annotations

* renamed var
2025-10-08 14:03:50 +02:00
Enrico Candino
9f265c73d9 Fix for HA server deletion (#493)
* wip

* wip

* wip

* removed todo
2025-10-08 13:23:15 +02:00
Enrico Candino
00ef6d582c Add log-format, and cleanup (#494)
* using logr.Logger

* testing levels

* adding log format

* fix lint

* removed tests

* final cleanup
2025-10-08 13:19:57 +02:00
Enrico Candino
5c95ca3dfa Fix for pod eviction in host cluster (#484)
* update statefulset controller

* fix for single pod

* adding pod controller

* added test

* removed comment

* merged service controller

* revert statefulset

* added test

* added common owner filter
2025-10-03 16:22:54 +02:00
jpgouin
6523b8339b change the default storage request size request to 2Gi (#490)
* change the default storage request size request to 2Gi
2025-10-03 09:04:13 +02:00
Hussein Galal
80037e815f Adding upgrade path tests (#481)
* Adding upgrade path tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Remove update label

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-02 14:53:08 +03:00
Enrico Candino
7585611792 Rename PodController to StatefulSetController (#482)
* renamed pod.go

* update statefulset controller

* fix for single pod

* added test, revert finalizer

* wip ha deletion

* revert logic

* remove focus
2025-10-01 17:06:24 +02:00
Hussein Galal
0bd681ab60 Lb service status sync (#451)
* Sync service LB status back to virtual service

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Sync service LB status back to virtual service

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-01 13:25:31 +03:00
Hussein Galal
4fe36b3d0c Bump Chart to v0.3.5 (#485)
* Bump Chart to v0.3.5

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Bump Chart to v0.3.5

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-30 15:26:59 +03:00
Enrico Candino
01589bb359 splitting tests (#461) 2025-09-23 12:07:49 +02:00
Hussein Galal
30217df268 Bump chart to v0.3.5-rc1 (#467)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-17 12:14:44 +03:00
Enrico Candino
04198652d5 check for single expose mode (#466) 2025-09-17 10:39:55 +02:00
Hussein Galal
72eb819216 Add imagepullsecrets to controller, server, and agents (#455)
* Add imagepullsecrets to controller, server, and agents

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix test cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fxing tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add agent section to helm chart values

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix charts values

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixing chart and refactoring cluster config

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* default lists to the values of imagepullsecrets

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* more fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix version image function and add unit tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* simplify arguments and remove registry from the code

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-17 11:29:01 +03:00
Alex Bissessur
4d4003f6f9 fix broken k3kcli docs path (#463)
Signed-off-by: alex <alexbissessur@gmail.com>
2025-09-16 16:41:19 +02:00
Hussein Galal
aca01127f8 Fix PVC sync and sync defaults (#458)
* Fix PVC sync and sync defaults

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix PVC sync and sync defaults

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes to pvc sync

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* increase the timeout on the e2e test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* configure the syncConfig correctly in vcp

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* update docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix policy unit test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* revert timeout of the test to 20 second

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-16 13:01:12 +03:00
Enrico Candino
1550c6b45a Add k3k controller coverage data (#452)
* added k3k controller coverage data

* cleanup
2025-09-03 11:37:56 +02:00
Hussein Galal
caf785f23b Add resources sync configuration (#431)
* Add resources sync configuration

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* update docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* refactor cluster sync

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* more fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* more fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* simplify the syncerContext

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* simplify the syncerContext

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* drop the ClusterClient struct

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix updates to syncer

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* refactor secrets/configmaps sync

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* refactor secrets/configmaps sync

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add imagepullsecret translation

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add exception for deleted resources

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* linting fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove the option to disable imagepullsecret translation

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-01 14:34:29 +03:00
Hussein Galal
b3f7a8ab7f Fix subpath field (#441)
* Fix pod fieldpath annotation translation (#434)

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix overrideEnvVars

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove extra comment

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix unit tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix merge env vars

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-01 11:10:31 +03:00
Enrico Candino
bd2494a0a9 Bump Charts to 0.3.4 (#446) 2025-08-28 10:57:23 +02:00
Enrico Candino
237a3cb280 Bump Charts to 0.3.4-rc3 (#445) 2025-08-25 19:02:30 +02:00
Enrico Candino
d23cf86fce Fix missing custom-certs flag in cli (#444)
* fix missing custom-certs path in cli

* fix docs
2025-08-25 18:37:29 +02:00
Enrico Candino
65cb8ad123 bump chart (#440) 2025-08-19 10:57:07 +02:00
Hussein Galal
6db88b5a00 Revert "Fix pod fieldpath annotation translation (#434)" (#435)
This reverts commit 883d401ae3.
2025-08-18 14:28:19 +03:00
Hussein Galal
8d89c7d133 Fix service port for generated kubeconfig secret (#433)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-08-18 09:17:30 +03:00
Hussein Galal
883d401ae3 Fix pod fieldpath annotation translation (#434)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-08-18 09:16:58 +03:00
Enrico Candino
f85702dc23 Bump version and appVersion to 0.3.4-rc1 in Chart.yaml (#429) 2025-07-24 17:13:12 +02:00
Enrico Candino
084701fcd9 Migrate from urfave/cli to cobra (#426)
* wip

* env var fix

* cluster create

* cluster create and delete

* cluster list

* cluster cmd

* kubeconfig

* policy create

* policy delete and list, and added root commands

* removed urfavecli from k3kcli

* fix policy command

* k3k-kubelet to cobra

* updated docs

* updated go.mod

* updated test

* added deletion

* added cleanup and flake attempts

* wip bind env

* simplified config
2025-07-24 16:49:40 +02:00
Enrico Candino
5eb1d2a5bb Adding some tests for k3kcli (#417)
* adding some cli tests

* added coverage and tests

* fix lint and cli tests

* fix defer

* some more cli tests
2025-07-23 11:03:41 +02:00
Enrico Candino
98d17cdb50 Added new golangci-lint formatters (#425)
* add gci formatter

* gofmt and gofumpt

* rewrite rule

* added make fmt
2025-07-22 10:42:41 +02:00
Enrico Candino
2047a600ed Migrate golangci-lint to v2 (#424)
* golangci-lint upgrade

* fix lint
2025-07-22 10:10:26 +02:00
Hussein Galal
a98c49b59a Adding custom certificate to the virtual clusters (#409)
* Adding custom certificate to the virtual clusters

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* docs update

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* integrate cert-manager

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add individual cert tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-07-21 19:23:11 +03:00
Enrico Candino
1048e3f82d fix for portallocator initialization (#423) 2025-07-21 17:03:39 +02:00
Alex Bissessur
c480bc339e update ver for k3kcli install (#421)
Signed-off-by: xelab04 <alexbissessur@gmail.com>
2025-07-21 11:18:34 +02:00
Enrico Candino
a0af20f20f codecov (#418) 2025-07-18 11:50:57 +02:00
Enrico Candino
748a439d7a fix for restoring policy (#413) 2025-07-17 10:25:09 +02:00
Enrico Candino
0a55bec305 improve chart-release workflow (#412) 2025-07-14 15:56:30 +02:00
Enrico Candino
2ab71df139 Add Conditions and current status to Cluster (#408)
* Added Cluster Conditions

* added e2e tests

* fix lint

* cli polling

* update tests
2025-07-14 15:53:37 +02:00
Enrico Candino
753b31b52a Adding configurable maxConcurrentReconcilers and small CRD cleanup (#410)
* removed Persistence from Status, fixed default for StorageSize and StorageDefault

* added configurable maxConcurrentReconciles

* fix concurrent issues

* add validate as prereq for tests
2025-07-10 14:46:33 +02:00
Hussein Galal
fcc875ab85 Mirror host nodes (#389)
* mirror host nodes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add mirror host nodes feature

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add controllername to secrets/configmap syncer

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* golint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* build docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* setting controller namespace env

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add a controller_namespace env to the test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add mirrorHostNodes spec to conformance tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* change the ptr int to int

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix map key name

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-07-08 14:48:24 +03:00
Enrico Candino
57263bd10e fail fast matrix (#398) 2025-07-01 11:04:56 +02:00
Enrico Candino
bf82318ad9 Add PriorityClass reconciler (virtual cluster -> host) (#377)
* added priorityclass controller

* added priorityClass controller tests

* fix for update priorityClass

* fix system skip priorityclass

* fix name
2025-07-01 11:03:14 +02:00
jpgouin
1ca86d09d1 add troubleshoot how to guide (#390)
* add troubleshoot how to guide

Co-authored-by: Enrico Candino <enrico.candino@gmail.com>
2025-06-30 16:54:13 +02:00
156 changed files with 10899 additions and 3411 deletions

View File

@@ -1 +1,3 @@
release-name-template: chart-{{ .Version }}
make-release-latest: false
skip-existing: true

View File

@@ -2,9 +2,6 @@ name: Chart
on:
workflow_dispatch:
push:
tags:
- "chart-*"
permissions:
contents: write
@@ -18,15 +15,6 @@ jobs:
with:
fetch-depth: 0
- name: Check tag
if: github.event_name == 'push'
run: |
pushed_tag=$(echo ${{ github.ref_name }} | sed "s/chart-//")
chart_tag=$(yq .version charts/k3k/Chart.yaml)
echo pushed_tag=${pushed_tag} chart_tag=${chart_tag}
[ "${pushed_tag}" == "${chart_tag}" ]
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"

View File

@@ -0,0 +1,125 @@
name: Conformance Tests - Virtual Mode
on:
schedule:
- cron: "0 1 * * *"
workflow_dispatch:
permissions:
contents: read
jobs:
conformance:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
type:
- parallel
- serial
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install helm
uses: azure/setup-helm@v4.3.0
- name: Install hydrophone
run: go install sigs.k8s.io/hydrophone@latest
- name: Install k3s
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
K3S_HOST_VERSION: v1.32.1+k3s1
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${K3S_HOST_VERSION} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
kubectl cluster-info
kubectl get nodes
- name: Build, package and setup K3k
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
export REPO=ttl.sh/$(uuidgen)
export VERSION=1h
make build
make package
make push
make install
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
echo "Wait for K3k controller to be available"
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
- name: Check k3kcli
run: k3kcli -v
- name: Create virtual cluster
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
k3kcli cluster create --mode=virtual --servers=2 mycluster
export KUBECONFIG=${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml
kubectl cluster-info
kubectl get nodes
kubectl get pods -A
- name: Run conformance tests (parallel)
if: matrix.type == 'parallel'
run: |
# Run conformance tests in parallel mode (skipping serial)
hydrophone --conformance --parallel 4 --skip='\[Serial\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Run conformance tests (serial)
if: matrix.type == 'serial'
run: |
# Run serial conformance tests
hydrophone --focus='\[Serial\].*\[Conformance\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Export logs
if: always()
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
journalctl -u k3s -o cat --no-pager > /tmp/k3s.log
kubectl logs -n k3k-system -l "app.kubernetes.io/name=k3k" --tail=-1 > /tmp/k3k.log
- name: Archive K3s logs
uses: actions/upload-artifact@v4
if: always()
with:
name: k3s-${{ matrix.type }}-logs
path: /tmp/k3s.log
- name: Archive K3k logs
uses: actions/upload-artifact@v4
if: always()
with:
name: k3k-${{ matrix.type }}-logs
path: /tmp/k3k.log
- name: Archive conformance logs
uses: actions/upload-artifact@v4
if: always()
with:
name: conformance-${{ matrix.type }}-logs
path: /tmp/e2e.log

View File

@@ -106,13 +106,14 @@ jobs:
kubectl create namespace k3k-mycluster
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster
namespace: k3k-mycluster
spec:
servers: 2
mirrorHostNodes: true
tlsSANs:
- "127.0.0.1"
expose:
@@ -156,7 +157,6 @@ jobs:
sigs:
runs-on: ubuntu-latest
if: inputs.test == '' || inputs.test != 'conformance'
strategy:
fail-fast: false
@@ -184,6 +184,12 @@ jobs:
focus: '\[sig-storage\].*\[Conformance\]'
steps:
- name: Validate input and fail fast
if: inputs.test != '' && inputs.test != matrix.tests.name
run: |
echo "Failing this job as it's not the intended target."
exit 1
- name: Checkout code
uses: actions/checkout@v4
with:
@@ -253,13 +259,14 @@ jobs:
kubectl create namespace k3k-mycluster
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster
namespace: k3k-mycluster
spec:
servers: 2
mirrorHostNodes: true
tlsSANs:
- "127.0.0.1"
expose:
@@ -279,7 +286,6 @@ jobs:
kubectl get pods -A
- name: Run sigs tests
if: inputs.test == '' || inputs.test == matrix.tests.name
run: |
FOCUS="${{ matrix.tests.focus }}"
echo "Running with --focus=${FOCUS}"

184
.github/workflows/test-e2e.yaml vendored Normal file
View File

@@ -0,0 +1,184 @@
name: Tests E2E
on:
push:
pull_request:
workflow_dispatch:
permissions:
contents: read
jobs:
validate:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Validate
run: make validate
tests-e2e:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Setup environment
run: |
mkdir ${{ github.workspace }}/covdata
echo "COVERAGE=true" >> $GITHUB_ENV
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
echo "VERSION=1h" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
- name: Install k3s
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
- name: Build and package and push dev images
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
REPO: ${{ env.REPO }}
VERSION: ${{ env.VERSION }}
run: |
make build
make package
make push
make install
- name: Run e2e tests
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
REPO: ${{ env.REPO }}
VERSION: ${{ env.VERSION }}
run: make E2E_LABEL_FILTER="e2e && !slow" test-e2e
- name: Convert coverage data
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
- name: Upload coverage reports to Codecov (controller)
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${GOCOVERDIR}/cover.out
flags: controller
- name: Upload coverage reports to Codecov (e2e)
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: e2e
- name: Archive k3s logs
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-k3k-logs
path: /tmp/k3k.log
tests-e2e-slow:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Setup environment
run: |
mkdir ${{ github.workspace }}/covdata
echo "COVERAGE=true" >> $GITHUB_ENV
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
echo "VERSION=1h" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
- name: Install k3s
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
- name: Build and package and push dev images
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
REPO: ${{ env.REPO }}
VERSION: ${{ env.VERSION }}
run: |
make build
make package
make push
make install
- name: Run e2e tests
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
REPO: ${{ env.REPO }}
VERSION: ${{ env.VERSION }}
run: make E2E_LABEL_FILTER="e2e && slow" test-e2e
- name: Convert coverage data
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
- name: Upload coverage reports to Codecov (controller)
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${GOCOVERDIR}/cover.out
flags: controller
- name: Upload coverage reports to Codecov (e2e)
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: e2e
- name: Archive k3s logs
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-k3k-logs
path: /tmp/k3k.log

View File

@@ -11,7 +11,7 @@ permissions:
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -21,12 +21,12 @@ jobs:
go-version-file: go.mod
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
uses: golangci/golangci-lint-action@v8
with:
args: --timeout=5m
version: v1.64
version: v2.3.0
tests:
validate:
runs-on: ubuntu-latest
steps:
@@ -36,15 +36,35 @@ jobs:
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Validate
run: make validate
tests:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Run unit tests
run: make test-unit
tests-e2e:
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: unit
tests-cli:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
@@ -56,13 +76,18 @@ jobs:
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Validate
run: make validate
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Setup environment
run: |
mkdir ${{ github.workspace }}/covdata
echo "COVERAGE=true" >> $GITHUB_ENV
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
- name: Build and package
run: |
make build
@@ -74,19 +99,32 @@ jobs:
- name: Check k3kcli
run: k3kcli -v
- name: Run e2e tests
run: make test-e2e
- name: Run cli tests
env:
K3K_DOCKER_INSTALL: "true"
K3S_HOST_VERSION: "${{ env.K3S_HOST_VERSION }}"
run: make test-cli
- name: Convert coverage data
run: go tool covdata textfmt -i=${{ github.workspace }}/covdata -o ${{ github.workspace }}/covdata/cover.out
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${{ github.workspace }}/covdata/cover.out
flags: cli
- name: Archive k3s logs
uses: actions/upload-artifact@v4
if: always()
with:
name: k3s-logs
name: cli-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@v4
if: always()
with:
name: k3k-logs
name: cli-k3k-logs
path: /tmp/k3k.log

3
.gitignore vendored
View File

@@ -8,3 +8,6 @@
__debug*
*-kubeconfig.yaml
.envtest
cover.out
covcounters.**
covmeta.**

View File

@@ -1,13 +1,27 @@
version: "2"
linters:
enable:
# default linters
- errcheck
- gosimple
- govet
- ineffassign
- staticcheck
- unused
- misspell
- wsl_v5
# extra
- misspell
- wsl
formatters:
enable:
- gci
- gofmt
- gofumpt
settings:
gci:
# The default order is `standard > default > custom > blank > dot > alias > localmodule`.
custom-order: true
sections:
- standard
- default
- alias
- localmodule
- dot
- blank
gofmt:
rewrite-rules:
- pattern: 'interface{}'
replacement: 'any'

View File

@@ -1,21 +1,26 @@
REPO ?= rancher
COVERAGE ?= false
VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*")
## Dependencies
GOLANGCI_LINT_VERSION := v1.64.8
GOLANGCI_LINT_VERSION := v2.3.0
GINKGO_VERSION ?= v2.21.0
GINKGO_FLAGS ?= -v -r --coverprofile=cover.out --coverpkg=./...
ENVTEST_VERSION ?= v0.0.0-20250505003155-b6c5897febe5
ENVTEST_K8S_VERSION := 1.31.0
CRD_REF_DOCS_VER ?= v0.1.0
GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
GINKGO ?= go run github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION)
CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
ENVTEST ?= go run sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION)
ENVTEST_DIR ?= $(shell pwd)/.envtest
E2E_LABEL_FILTER ?= "e2e"
export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(ENVTEST_DIR) -p path)
@@ -28,7 +33,7 @@ version: ## Print the current version
.PHONY: build
build: ## Build the the K3k binaries (k3k, k3k-kubelet and k3kcli)
@VERSION=$(VERSION) ./scripts/build
@VERSION=$(VERSION) COVERAGE=$(COVERAGE) ./scripts/build
.PHONY: package
package: package-k3k package-k3k-kubelet ## Package the k3k and k3k-kubelet Docker images
@@ -51,19 +56,27 @@ push-%:
.PHONY: test
test: ## Run all the tests
$(GINKGO) -v -r --label-filter=$(label-filter)
$(GINKGO) $(GINKGO_FLAGS) --label-filter=$(label-filter)
.PHONY: test-unit
test-unit: ## Run the unit tests (skips the e2e)
$(GINKGO) -v -r --skip-file=tests/*
$(GINKGO) $(GINKGO_FLAGS) --skip-file=tests/*
.PHONY: test-controller
test-controller: ## Run the controller tests (pkg/controller)
$(GINKGO) -v -r pkg/controller
$(GINKGO) $(GINKGO_FLAGS) pkg/controller
.PHONY: test-kubelet-controller
test-kubelet-controller: ## Run the controller tests (pkg/controller)
$(GINKGO) $(GINKGO_FLAGS) k3k-kubelet/controller
.PHONY: test-e2e
test-e2e: ## Run the e2e tests
$(GINKGO) -v -r tests
$(GINKGO) $(GINKGO_FLAGS) --label-filter="$(E2E_LABEL_FILTER)" tests
.PHONY: test-cli
test-cli: ## Run the cli tests
$(GINKGO) $(GINKGO_FLAGS) --label-filter=cli --flake-attempts=3 tests
.PHONY: generate
generate: ## Generate the CRDs specs
@@ -73,7 +86,7 @@ generate: ## Generate the CRDs specs
docs: ## Build the CRDs and CLI docs
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml \
--renderer=markdown \
--source-path=./pkg/apis/k3k.io/v1alpha1 \
--source-path=./pkg/apis/k3k.io/v1beta1 \
--output-path=./docs/crds/crd-docs.md
@go run ./docs/cli/genclidoc.go
@@ -81,8 +94,12 @@ docs: ## Build the CRDs and CLI docs
lint: ## Find any linting issues in the project
$(GOLANGCI_LINT) run --timeout=5m
.PHONY: fmt
fmt: ## Find any linting issues in the project
$(GOLANGCI_LINT) fmt ./...
.PHONY: validate
validate: generate docs ## Validate the project checking for any dependency or doc mismatch
validate: generate docs fmt ## Validate the project checking for any dependency or doc mismatch
$(GINKGO) unfocus
go mod tidy
git status --porcelain
@@ -91,10 +108,12 @@ validate: generate docs ## Validate the project checking for any dependency or d
.PHONY: install
install: ## Install K3k with Helm on the targeted Kubernetes cluster
helm upgrade --install --namespace k3k-system --create-namespace \
--set image.repository=$(REPO)/k3k \
--set image.tag=$(VERSION) \
--set sharedAgent.image.repository=$(REPO)/k3k-kubelet \
--set sharedAgent.image.tag=$(VERSION) \
--set controller.extraEnv[0].name=DEBUG \
--set-string controller.extraEnv[0].value=true \
--set controller.image.repository=$(REPO)/k3k \
--set controller.image.tag=$(VERSION) \
--set agent.shared.image.repository=$(REPO)/k3k-kubelet \
--set agent.shared.image.tag=$(VERSION) \
k3k ./charts/k3k/
.PHONY: help

View File

@@ -3,7 +3,8 @@
[![Experimental](https://img.shields.io/badge/status-experimental-orange.svg)](https://shields.io/)
[![Go Report Card](https://goreportcard.com/badge/github.com/rancher/k3k)](https://goreportcard.com/report/github.com/rancher/k3k)
![Tests](https://github.com/rancher/k3k/actions/workflows/test.yaml/badge.svg)
![Build](https://github.com/rancher/k3k/actions/workflows/build.yml/badge.svg)
![Build](https://github.com/rancher/k3k/actions/workflows/build.yml/badge.svg)
[![Conformance Tests - Virtual Mode](https://github.com/rancher/k3k/actions/workflows/test-conformance-virtual.yaml/badge.svg)](https://github.com/rancher/k3k/actions/workflows/test-conformance-virtual.yaml)
K3k, Kubernetes in Kubernetes, is a tool that empowers you to create and manage isolated K3s clusters within your existing Kubernetes environment. It enables efficient multi-tenancy, streamlined experimentation, and robust resource isolation, minimizing infrastructure costs by allowing you to run multiple lightweight Kubernetes clusters on the same physical host. K3k offers both "shared" mode, optimizing resource utilization, and "virtual" mode, providing complete isolation with dedicated K3s server pods. This allows you to access a full Kubernetes experience without the overhead of managing separate physical resources.
@@ -71,7 +72,7 @@ To install it, simply download the latest available version for your architectur
For example, you can download the Linux amd64 version with:
```
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.2/k3kcli-linux-amd64 && \
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.5/k3kcli-linux-amd64 && \
chmod +x k3kcli && \
sudo mv k3kcli /usr/local/bin
```
@@ -79,7 +80,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.2/k3kcli-l
You should now be able to run:
```bash
-> % k3kcli --version
k3kcli Version: v0.3.2
k3kcli version v0.3.5
```
@@ -135,7 +136,7 @@ You can also directly create a Cluster resource in some namespace, to create a K
```bash
kubectl apply -f - <<EOF
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.3.3
appVersion: v0.3.3
version: 1.0.0-rc1
appVersion: v1.0.0-rc1

View File

@@ -18,10 +18,13 @@ spec:
- jsonPath: .spec.mode
name: Mode
type: string
- jsonPath: .status.phase
name: Status
type: string
- jsonPath: .status.policyName
name: Policy
type: string
name: v1alpha1
name: v1beta1
schema:
openAPIV3Schema:
description: |-
@@ -220,6 +223,112 @@ spec:
x-kubernetes-validations:
- message: clusterDNS is immutable
rule: self == oldSelf
customCAs:
description: CustomCAs specifies the cert/key pairs for custom CA
certificates.
properties:
enabled:
default: true
description: Enabled toggles this feature on or off.
type: boolean
sources:
description: Sources defines the sources for all required custom
CA certificates.
properties:
clientCA:
description: ClientCA specifies the client-ca cert/key pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
required:
- secretName
type: object
etcdPeerCA:
description: ETCDPeerCA specifies the etcd-peer-ca cert/key
pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
required:
- secretName
type: object
etcdServerCA:
description: ETCDServerCA specifies the etcd-server-ca cert/key
pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
required:
- secretName
type: object
requestHeaderCA:
description: RequestHeaderCA specifies the request-header-ca
cert/key pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
required:
- secretName
type: object
serverCA:
description: ServerCA specifies the server-ca cert/key pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
required:
- secretName
type: object
serviceAccountToken:
description: ServiceAccountToken specifies the service-account-token
key.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
required:
- secretName
type: object
required:
- clientCA
- etcdPeerCA
- etcdServerCA
- requestHeaderCA
- serverCA
- serviceAccountToken
type: object
required:
- enabled
- sources
type: object
expose:
description: |-
Expose specifies options for exposing the API server.
@@ -240,7 +349,7 @@ spec:
use for the Ingress.
type: string
type: object
loadbalancer:
loadBalancer:
description: LoadBalancer specifies options for exposing the API
server through a LoadBalancer service.
properties:
@@ -279,6 +388,16 @@ spec:
type: integer
type: object
type: object
x-kubernetes-validations:
- message: ingress, loadbalancer and nodePort are mutually exclusive;
only one can be set
rule: '[has(self.ingress), has(self.loadBalancer), has(self.nodePort)].filter(x,
x).size() <= 1'
mirrorHostNodes:
description: |-
MirrorHostNodes controls whether node objects from the host cluster
are mirrored into the virtual cluster.
type: boolean
mode:
allOf:
- enum:
@@ -303,8 +422,6 @@ spec:
In "shared" mode, this also applies to workloads.
type: object
persistence:
default:
type: dynamic
description: |-
Persistence specifies options for persisting etcd data.
Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.
@@ -316,6 +433,7 @@ spec:
This field is only relevant in "dynamic" mode.
type: string
storageRequestSize:
default: 2G
description: |-
StorageRequestSize is the requested size for the PVC.
This field is only relevant in "dynamic" mode.
@@ -324,8 +442,6 @@ spec:
default: dynamic
description: Type specifies the persistence mode.
type: string
required:
- type
type: object
priorityClass:
description: |-
@@ -486,6 +602,124 @@ spec:
x-kubernetes-validations:
- message: serviceCIDR is immutable
rule: self == oldSelf
sync:
default: {}
description: Sync specifies the resources types that will be synced
from virtual cluster to host cluster.
properties:
configMaps:
default:
enabled: true
description: ConfigMaps resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
ingresses:
default:
enabled: false
description: Ingresses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
persistentVolumeClaims:
default:
enabled: true
description: PersistentVolumeClaims resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
priorityClasses:
default:
enabled: false
description: PriorityClasses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
secrets:
default:
enabled: true
description: Secrets resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
services:
default:
enabled: true
description: Services resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
type: object
tlsSANs:
description: TLSSANs specifies subject alternative names for the K3s
server certificate.
@@ -524,6 +758,7 @@ spec:
type: object
type: object
status:
default: {}
description: Status reflects the observed state of the Cluster.
properties:
clusterCIDR:
@@ -532,29 +767,83 @@ spec:
clusterDNS:
description: ClusterDNS is the IP address for the CoreDNS service.
type: string
conditions:
description: Conditions are the individual conditions for the cluster
set.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
hostVersion:
description: HostVersion is the Kubernetes version of the host node.
type: string
persistence:
description: Persistence specifies options for persisting etcd data.
properties:
storageClassName:
description: |-
StorageClassName is the name of the StorageClass to use for the PVC.
This field is only relevant in "dynamic" mode.
type: string
storageRequestSize:
description: |-
StorageRequestSize is the requested size for the PVC.
This field is only relevant in "dynamic" mode.
type: string
type:
default: dynamic
description: Type specifies the persistence mode.
type: string
required:
- type
type: object
kubeletPort:
description: KubeletPort specefies the port used by k3k-kubelet in
shared mode.
type: integer
phase:
default: Unknown
description: Phase is a high-level summary of the cluster's current
lifecycle state.
enum:
- Pending
- Provisioning
- Ready
- Failed
- Terminating
- Unknown
type: string
policyName:
description: PolicyName specifies the virtual cluster policy name
bound to the virtual cluster.
@@ -568,6 +857,10 @@ spec:
items:
type: string
type: array
webhookPort:
description: WebhookPort specefies the port used by webhook in k3k-kubelet
in shared mode.
type: integer
type: object
type: object
served: true

View File

@@ -20,7 +20,7 @@ spec:
- jsonPath: .spec.allowedMode
name: Mode
type: string
name: v1alpha1
name: v1beta1
schema:
openAPIV3Schema:
description: |-
@@ -225,6 +225,124 @@ spec:
type: array
x-kubernetes-list-type: atomic
type: object
sync:
default: {}
description: Sync specifies the resources types that will be synced
from virtual cluster to host cluster.
properties:
configMaps:
default:
enabled: true
description: ConfigMaps resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
ingresses:
default:
enabled: false
description: Ingresses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
persistentVolumeClaims:
default:
enabled: true
description: PersistentVolumeClaims resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
priorityClasses:
default:
enabled: false
description: PriorityClasses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
secrets:
default:
enabled: true
description: Secrets resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
services:
default:
enabled: true
description: Services resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
type: object
type: object
status:
description: Status reflects the observed state of the VirtualClusterPolicy.

View File

@@ -60,3 +60,54 @@ Create the name of the service account to use
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Print the image pull secrets in the expected format (an array of objects with one possible field, "name").
*/}}
{{- define "image.pullSecrets" }}
{{- $imagePullSecrets := list }}
{{- range . }}
{{- if kindIs "string" . }}
{{- $imagePullSecrets = append $imagePullSecrets (dict "name" .) }}
{{- else }}
{{- $imagePullSecrets = append $imagePullSecrets . }}
{{- end }}
{{- end }}
{{- toYaml $imagePullSecrets }}
{{- end }}
{{- define "controller.registry" }}
{{- $registry := .Values.global.imageRegistry | default .Values.controller.image.registry -}}
{{- if $registry }}
{{- $registry }}/
{{- else }}
{{- $registry }}
{{- end }}
{{- end }}
{{- define "server.registry" }}
{{- $registry := .Values.global.imageRegistry | default .Values.server.image.registry -}}
{{- if $registry }}
{{- $registry }}/
{{- else }}
{{- $registry }}
{{- end }}
{{- end }}
{{- define "agent.virtual.registry" }}
{{- $registry := .Values.global.imageRegistry | default .Values.agent.virtual.image.registry -}}
{{- if $registry }}
{{- $registry }}/
{{- else }}
{{- $registry }}
{{- end }}
{{- end }}
{{- define "agent.shared.registry" }}
{{- $registry := .Values.global.imageRegistry | default .Values.agent.shared.image.registry -}}
{{- if $registry }}
{{- $registry }}/
{{- else }}
{{- $registry }}
{{- end }}
{{- end }}

View File

@@ -6,7 +6,7 @@ metadata:
{{- include "k3k.labels" . | nindent 4 }}
namespace: {{ .Release.Namespace }}
spec:
replicas: {{ .Values.image.replicaCount }}
replicas: {{ .Values.controller.replicas }}
selector:
matchLabels:
{{- include "k3k.selectorLabels" . | nindent 6 }}
@@ -15,21 +15,42 @@ spec:
labels:
{{- include "k3k.selectorLabels" . | nindent 8 }}
spec:
imagePullSecrets: {{- include "image.pullSecrets" (concat .Values.controller.imagePullSecrets .Values.global.imagePullSecrets) | nindent 8 }}
containers:
- image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
- image: "{{- include "controller.registry" .}}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
name: {{ .Chart.Name }}
{{- with .Values.controller.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
args:
- k3k
- --cluster-cidr={{ .Values.host.clusterCIDR }}
- --k3s-server-image={{- include "server.registry" .}}{{ .Values.server.image.repository }}
- --k3s-server-image-pull-policy={{ .Values.server.image.pullPolicy }}
- --agent-shared-image={{- include "agent.shared.registry" .}}{{ .Values.agent.shared.image.repository }}:{{ default .Chart.AppVersion .Values.agent.shared.image.tag }}
- --agent-shared-image-pull-policy={{ .Values.agent.shared.image.pullPolicy }}
- --agent-virtual-image={{- include "agent.virtual.registry" .}}{{ .Values.agent.virtual.image.repository }}
- --agent-virtual-image-pull-policy={{ .Values.agent.virtual.image.pullPolicy }}
- --kubelet-port-range={{ .Values.agent.shared.kubeletPortRange }}
- --webhook-port-range={{ .Values.agent.shared.webhookPortRange }}
{{- range $key, $value := include "image.pullSecrets" (concat .Values.agent.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
- --agent-image-pull-secret
- {{ .name }}
{{- end }}
{{- range $key, $value := include "image.pullSecrets" (concat .Values.server.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
- --server-image-pull-secret
- {{ .name }}
{{- end }}
env:
- name: CLUSTER_CIDR
value: {{ .Values.host.clusterCIDR }}
- name: SHARED_AGENT_IMAGE
value: "{{ .Values.sharedAgent.image.repository }}:{{ default .Chart.AppVersion .Values.sharedAgent.image.tag }}"
- name: SHARED_AGENT_PULL_POLICY
value: {{ .Values.sharedAgent.image.pullPolicy }}
- name: K3S_IMAGE
value: {{ .Values.k3sServer.image.repository }}
- name: K3S_IMAGE_PULL_POLICY
value: {{ .Values.k3sServer.image.pullPolicy }}
- name: CONTROLLER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.controller.extraEnv }}
{{- toYaml . | nindent 10 }}
{{- end }}
ports:
- containerPort: 8080
name: https

View File

@@ -16,7 +16,7 @@ subjects:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "k3k.fullname" . }}-node-proxy
name: k3k-kubelet-node
rules:
- apiGroups:
- ""
@@ -30,8 +30,29 @@ rules:
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "k3k.fullname" . }}-node-proxy
name: k3k-kubelet-node
roleRef:
kind: ClusterRole
name: {{ include "k3k.fullname" . }}-node-proxy
name: k3k-kubelet-node
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: k3k-priorityclass
rules:
- apiGroups:
- "scheduling.k8s.io"
resources:
- "priorityclasses"
verbs:
- "*"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: k3k-priorityclass
roleRef:
kind: ClusterRole
name: k3k-priorityclass
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,18 +1,11 @@
replicaCount: 1
image:
repository: rancher/k3k
tag: ""
pullPolicy: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
host:
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set
# the controller will collect the PodCIDRs of all the nodes on the system.
clusterCIDR: ""
global:
# -- Global override for container image registry
imageRegistry: ""
# -- Global override for container image registry pull secrets
imagePullSecrets: []
serviceAccount:
# Specifies whether a service account should be created
@@ -21,14 +14,72 @@ serviceAccount:
# If not set and create is true, a name is generated using the fullname template
name: ""
# configuration related to the shared agent mode in k3k
sharedAgent:
host:
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set
# the controller will collect the PodCIDRs of all the nodes on the system.
clusterCIDR: ""
controller:
replicas: 1
image:
repository: "rancher/k3k-kubelet"
registry: ""
repository: rancher/k3k
tag: ""
pullPolicy: ""
# image registry configuration related to the k3s server
k3sServer:
imagePullSecrets: []
# extraEnv allows you to specify additional environment variables for the k3k controller deployment.
# This is useful for passing custom configuration or secrets to the controller.
# For example:
# extraEnv:
# - name: MY_CUSTOM_VAR
# value: "my_custom_value"
# - name: ANOTHER_VAR
# valueFrom:
# secretKeyRef:
# name: my-secret
# key: my-key
extraEnv: []
# resources allows you to set resources limits and requests for CPU and Memory
# resources:
# limits:
# cpu: "200m"
# memory: "200Mi"
# requests:
# cpu: "100m"
# memory: "100Mi"
resources: {}
# configuration related to k3s server component in k3k
server:
imagePullSecrets: []
image:
registry:
repository: "rancher/k3s"
pullPolicy: ""
# configuration related to the agent component in k3k
agent:
imagePullSecrets: []
# configuration related to agent in shared mode
shared:
image:
registry: ""
repository: "rancher/k3k-kubelet"
tag: ""
pullPolicy: ""
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
kubeletPortRange: "50000-51000"
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
webhookPortRange: "51001-52000"
# configuration related to agent in virtual mode
virtual:
image:
registry: ""
repository: "rancher/k3s"
pullPolicy: ""

View File

@@ -1,17 +1,20 @@
package cmds
import (
"github.com/urfave/cli/v2"
"github.com/spf13/cobra"
)
func NewClusterCmd(appCtx *AppContext) *cli.Command {
return &cli.Command{
Name: "cluster",
Usage: "cluster command",
Subcommands: []*cli.Command{
NewClusterCreateCmd(appCtx),
NewClusterDeleteCmd(appCtx),
NewClusterListCmd(appCtx),
},
func NewClusterCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "cluster",
Short: "cluster command",
}
cmd.AddCommand(
NewClusterCreateCmd(appCtx),
NewClusterDeleteCmd(appCtx),
NewClusterListCmd(appCtx),
)
return cmd
}

View File

@@ -3,22 +3,28 @@ package cmds
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"strings"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
"k8s.io/utils/ptr"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
)
type CreateConfig struct {
@@ -27,10 +33,10 @@ type CreateConfig struct {
serviceCIDR string
servers int
agents int
serverArgs cli.StringSlice
agentArgs cli.StringSlice
serverEnvs cli.StringSlice
agentEnvs cli.StringSlice
serverArgs []string
agentArgs []string
serverEnvs []string
agentEnvs []string
persistenceType string
storageClassName string
storageRequestSize string
@@ -38,40 +44,42 @@ type CreateConfig struct {
mode string
kubeconfigServerHost string
policy string
mirrorHostNodes bool
customCertsPath string
timeout time.Duration
}
func NewClusterCreateCmd(appCtx *AppContext) *cli.Command {
func NewClusterCreateCmd(appCtx *AppContext) *cobra.Command {
createConfig := &CreateConfig{}
flags := CommonFlags(appCtx)
flags = append(flags, FlagNamespace(appCtx))
flags = append(flags, newCreateFlags(createConfig)...)
return &cli.Command{
Name: "create",
Usage: "Create new cluster",
UsageText: "k3kcli cluster create [command options] NAME",
Action: createAction(appCtx, createConfig),
Flags: flags,
HideHelpCommand: true,
cmd := &cobra.Command{
Use: "create",
Short: "Create new cluster",
Example: "k3kcli cluster create [command options] NAME",
PreRunE: func(cmd *cobra.Command, args []string) error {
return validateCreateConfig(createConfig)
},
RunE: createAction(appCtx, createConfig),
Args: cobra.ExactArgs(1),
}
CobraFlagNamespace(appCtx, cmd.Flags())
createFlags(cmd, createConfig)
return cmd
}
func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
return func(clx *cli.Context) error {
func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
name := args[0]
if clx.NArg() != 1 {
return cli.ShowSubcommandHelp(clx)
}
name := clx.Args().First()
if name == k3kcluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
if config.mode == string(v1alpha1.SharedClusterMode) && config.agents != 0 {
if config.mode == string(v1beta1.SharedClusterMode) && config.agents != 0 {
return errors.New("invalid flag, --agents flag is only allowed in virtual mode")
}
@@ -83,7 +91,7 @@ func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
if strings.Contains(config.version, "+") {
orig := config.version
config.version = strings.Replace(config.version, "+", "-", -1)
config.version = strings.ReplaceAll(config.version, "+", "-")
logrus.Warnf("Invalid K3s docker reference version: '%s'. Using '%s' instead", orig, config.version)
}
@@ -97,12 +105,18 @@ func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
}
}
if config.customCertsPath != "" {
if err := CreateCustomCertsSecrets(ctx, name, namespace, config.customCertsPath, client); err != nil {
return err
}
}
logrus.Infof("Creating cluster [%s] in namespace [%s]", name, namespace)
cluster := newCluster(name, namespace, config)
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
cluster.Spec.Expose = &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{},
}
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
@@ -126,9 +140,13 @@ func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
}
}
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
logrus.Infof("Waiting for cluster to be available..")
logrus.Infof("waiting for cluster to be available..")
if err := waitForCluster(ctx, client, cluster, config.timeout); err != nil {
return fmt.Errorf("failed to wait for cluster to become ready (status: %s): %w", cluster.Status.Phase, err)
}
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
// retry every 5s for at most 2m, or 25 times
availableBackoff := wait.Backoff{
@@ -142,42 +160,43 @@ func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
var kubeconfig *clientcmdapi.Config
if err := retry.OnError(availableBackoff, apierrors.IsNotFound, func() error {
kubeconfig, err = cfg.Generate(ctx, client, cluster, host[0])
kubeconfig, err = cfg.Generate(ctx, client, cluster, host[0], 0)
return err
}); err != nil {
return err
}
return writeKubeconfigFile(cluster, kubeconfig)
return writeKubeconfigFile(cluster, kubeconfig, "")
}
}
func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster {
cluster := &v1alpha1.Cluster{
func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "k3k.io/v1alpha1",
APIVersion: "k3k.io/v1beta1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Servers: ptr.To(int32(config.servers)),
Agents: ptr.To(int32(config.agents)),
ClusterCIDR: config.clusterCIDR,
ServiceCIDR: config.serviceCIDR,
ServerArgs: config.serverArgs.Value(),
AgentArgs: config.agentArgs.Value(),
ServerEnvs: env(config.serverEnvs.Value()),
AgentEnvs: env(config.agentEnvs.Value()),
ServerArgs: config.serverArgs,
AgentArgs: config.agentArgs,
ServerEnvs: env(config.serverEnvs),
AgentEnvs: env(config.agentEnvs),
Version: config.version,
Mode: v1alpha1.ClusterMode(config.mode),
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.PersistenceMode(config.persistenceType),
Mode: v1beta1.ClusterMode(config.mode),
Persistence: v1beta1.PersistenceConfig{
Type: v1beta1.PersistenceMode(config.persistenceType),
StorageClassName: ptr.To(config.storageClassName),
StorageRequestSize: config.storageRequestSize,
},
MirrorHostNodes: config.mirrorHostNodes,
},
}
if config.storageClassName == "" {
@@ -191,6 +210,32 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
}
}
if config.customCertsPath != "" {
cluster.Spec.CustomCAs = &v1beta1.CustomCAs{
Enabled: true,
Sources: v1beta1.CredentialSources{
ClientCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "client-ca"),
},
ServerCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "server-ca"),
},
ETCDServerCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-server-ca"),
},
ETCDPeerCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-peer-ca"),
},
RequestHeaderCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "request-header-ca"),
},
ServiceAccountToken: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "service-account-token"),
},
},
}
}
return cluster
}
@@ -211,3 +256,88 @@ func env(envSlice []string) []v1.EnvVar {
return envVars
}
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1beta1.Cluster, timeout time.Duration) error {
interval := 5 * time.Second
return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
key := client.ObjectKeyFromObject(cluster)
if err := k8sClient.Get(ctx, key, cluster); err != nil {
return false, fmt.Errorf("failed to get resource: %w", err)
}
// If resource ready -> stop polling
if cluster.Status.Phase == v1beta1.ClusterReady {
return true, nil
}
// If resource failed -> stop polling with an error
if cluster.Status.Phase == v1beta1.ClusterFailed {
return true, fmt.Errorf("cluster creation failed: %s", cluster.Status.Phase)
}
// Condition not met, continue polling.
return false, nil
})
}
func CreateCustomCertsSecrets(ctx context.Context, name, namespace, customCertsPath string, k8sclient client.Client) error {
customCAsMap := map[string]string{
"etcd-peer-ca": "/etcd/peer-ca",
"etcd-server-ca": "/etcd/server-ca",
"server-ca": "/server-ca",
"client-ca": "/client-ca",
"request-header-ca": "/request-header-ca",
"service-account-token": "/service",
}
for certName, fileName := range customCAsMap {
var (
certFilePath, keyFilePath string
cert, key []byte
err error
)
if certName != "service-account-token" {
certFilePath = customCertsPath + fileName + ".crt"
cert, err = os.ReadFile(certFilePath)
if err != nil {
return err
}
}
keyFilePath = customCertsPath + fileName + ".key"
key, err = os.ReadFile(keyFilePath)
if err != nil {
return err
}
certSecret := caCertSecret(certName, name, namespace, cert, key)
if err := k8sclient.Create(ctx, certSecret); err != nil {
return client.IgnoreAlreadyExists(err)
}
}
return nil
}
func caCertSecret(certName, clusterName, clusterNamespace string, cert, key []byte) *v1.Secret {
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: controller.SafeConcatNameWithPrefix(clusterName, certName),
Namespace: clusterNamespace,
},
Type: v1.SecretTypeTLS,
Data: map[string][]byte{
v1.TLSCertKey: cert,
v1.TLSPrivateKeyKey: key,
},
}
}

View File

@@ -2,124 +2,62 @@ package cmds
import (
"errors"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/urfave/cli/v2"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func newCreateFlags(config *CreateConfig) []cli.Flag {
return []cli.Flag{
&cli.IntFlag{
Name: "servers",
Usage: "number of servers",
Destination: &config.servers,
Value: 1,
Action: func(ctx *cli.Context, value int) error {
if value <= 0 {
return errors.New("invalid number of servers")
}
return nil
},
},
&cli.IntFlag{
Name: "agents",
Usage: "number of agents",
Destination: &config.agents,
},
&cli.StringFlag{
Name: "token",
Usage: "token of the cluster",
Destination: &config.token,
},
&cli.StringFlag{
Name: "cluster-cidr",
Usage: "cluster CIDR",
Destination: &config.clusterCIDR,
},
&cli.StringFlag{
Name: "service-cidr",
Usage: "service CIDR",
Destination: &config.serviceCIDR,
},
&cli.StringFlag{
Name: "persistence-type",
Usage: "persistence mode for the nodes (dynamic, ephemeral, static)",
Value: string(v1alpha1.DynamicPersistenceMode),
Destination: &config.persistenceType,
Action: func(ctx *cli.Context, value string) error {
switch v1alpha1.PersistenceMode(value) {
case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode:
return nil
default:
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
}
},
},
&cli.StringFlag{
Name: "storage-class-name",
Usage: "storage class name for dynamic persistence type",
Destination: &config.storageClassName,
},
&cli.StringFlag{
Name: "storage-request-size",
Usage: "storage size for dynamic persistence type",
Destination: &config.storageRequestSize,
Action: func(ctx *cli.Context, value string) error {
if _, err := resource.ParseQuantity(value); err != nil {
return errors.New(`invalid storage size, should be a valid resource quantity e.g "10Gi"`)
}
return nil
},
},
&cli.StringSliceFlag{
Name: "server-args",
Usage: "servers extra arguments",
Destination: &config.serverArgs,
},
&cli.StringSliceFlag{
Name: "agent-args",
Usage: "agents extra arguments",
Destination: &config.agentArgs,
},
&cli.StringSliceFlag{
Name: "server-envs",
Usage: "servers extra Envs",
Destination: &config.serverEnvs,
},
&cli.StringSliceFlag{
Name: "agent-envs",
Usage: "agents extra Envs",
Destination: &config.agentEnvs,
},
&cli.StringFlag{
Name: "version",
Usage: "k3s version",
Destination: &config.version,
},
&cli.StringFlag{
Name: "mode",
Usage: "k3k mode type (shared, virtual)",
Destination: &config.mode,
Value: "shared",
Action: func(ctx *cli.Context, value string) error {
switch value {
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)
}
},
},
&cli.StringFlag{
Name: "kubeconfig-server",
Usage: "override the kubeconfig server host",
Destination: &config.kubeconfigServerHost,
},
&cli.StringFlag{
Name: "policy",
Usage: "The policy to create the cluster in",
Destination: &config.policy,
},
}
func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
cmd.Flags().IntVar(&cfg.servers, "servers", 1, "number of servers")
cmd.Flags().IntVar(&cfg.agents, "agents", 0, "number of agents")
cmd.Flags().StringVar(&cfg.token, "token", "", "token of the cluster")
cmd.Flags().StringVar(&cfg.clusterCIDR, "cluster-cidr", "", "cluster CIDR")
cmd.Flags().StringVar(&cfg.serviceCIDR, "service-cidr", "", "service CIDR")
cmd.Flags().BoolVar(&cfg.mirrorHostNodes, "mirror-host-nodes", false, "Mirror Host Cluster Nodes")
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1beta1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral, static)")
cmd.Flags().StringVar(&cfg.storageClassName, "storage-class-name", "", "storage class name for dynamic persistence type")
cmd.Flags().StringVar(&cfg.storageRequestSize, "storage-request-size", "", "storage size for dynamic persistence type")
cmd.Flags().StringSliceVar(&cfg.serverArgs, "server-args", []string{}, "servers extra arguments")
cmd.Flags().StringSliceVar(&cfg.agentArgs, "agent-args", []string{}, "agents extra arguments")
cmd.Flags().StringSliceVar(&cfg.serverEnvs, "server-envs", []string{}, "servers extra Envs")
cmd.Flags().StringSliceVar(&cfg.agentEnvs, "agent-envs", []string{}, "agents extra Envs")
cmd.Flags().StringVar(&cfg.version, "version", "", "k3s version")
cmd.Flags().StringVar(&cfg.mode, "mode", "shared", "k3k mode type (shared, virtual)")
cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host")
cmd.Flags().StringVar(&cfg.policy, "policy", "", "The policy to create the cluster in")
cmd.Flags().StringVar(&cfg.customCertsPath, "custom-certs", "", "The path for custom certificate directory")
cmd.Flags().DurationVar(&cfg.timeout, "timeout", 3*time.Minute, "The timeout for waiting for the cluster to become ready (e.g., 10s, 5m, 1h).")
}
func validateCreateConfig(cfg *CreateConfig) error {
if cfg.servers <= 0 {
return errors.New("invalid number of servers")
}
if cfg.persistenceType != "" {
switch v1beta1.PersistenceMode(cfg.persistenceType) {
case v1beta1.EphemeralPersistenceMode, v1beta1.DynamicPersistenceMode:
return nil
default:
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
}
}
if _, err := resource.ParseQuantity(cfg.storageRequestSize); err != nil {
return errors.New(`invalid storage size, should be a valid resource quantity e.g "10Gi"`)
}
if cfg.mode != "" {
switch cfg.mode {
case string(v1beta1.VirtualClusterMode), string(v1beta1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)
}
}
return nil
}

View File

@@ -4,52 +4,44 @@ import (
"context"
"errors"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
var keepData bool
func NewClusterDeleteCmd(appCtx *AppContext) *cli.Command {
flags := CommonFlags(appCtx)
flags = append(flags, FlagNamespace(appCtx))
flags = append(flags,
&cli.BoolFlag{
Name: "keep-data",
Usage: "keeps persistence volumes created for the cluster after deletion",
Destination: &keepData,
},
)
return &cli.Command{
Name: "delete",
Usage: "Delete an existing cluster",
UsageText: "k3kcli cluster delete [command options] NAME",
Action: delete(appCtx),
Flags: flags,
HideHelpCommand: true,
func NewClusterDeleteCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "delete",
Short: "Delete an existing cluster",
Example: "k3kcli cluster delete [command options] NAME",
RunE: delete(appCtx),
Args: cobra.ExactArgs(1),
}
CobraFlagNamespace(appCtx, cmd.Flags())
cmd.Flags().BoolVar(&keepData, "keep-data", false, "keeps persistence volumes created for the cluster after deletion")
return cmd
}
func delete(appCtx *AppContext) cli.ActionFunc {
return func(clx *cli.Context) error {
func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
name := args[0]
if clx.NArg() != 1 {
return cli.ShowSubcommandHelp(clx)
}
name := clx.Args().First()
if name == k3kcluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
@@ -58,7 +50,7 @@ func delete(appCtx *AppContext) cli.ActionFunc {
logrus.Infof("Deleting [%s] cluster in namespace [%s]", name, namespace)
cluster := v1alpha1.Cluster{
cluster := v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
@@ -94,7 +86,7 @@ func delete(appCtx *AppContext) cli.ActionFunc {
}
}
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1alpha1.Cluster) error {
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1beta1.Cluster) error {
var secret v1.Secret
key := types.NamespacedName{

View File

@@ -3,38 +3,36 @@ package cmds
import (
"context"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/urfave/cli/v2"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/printers"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func NewClusterListCmd(appCtx *AppContext) *cli.Command {
flags := CommonFlags(appCtx)
flags = append(flags, FlagNamespace(appCtx))
return &cli.Command{
Name: "list",
Usage: "List all the existing cluster",
UsageText: "k3kcli cluster list [command options]",
Action: list(appCtx),
Flags: flags,
HideHelpCommand: true,
func NewClusterListCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "list",
Short: "List all the existing cluster",
Example: "k3kcli cluster list [command options]",
RunE: list(appCtx),
Args: cobra.NoArgs,
}
CobraFlagNamespace(appCtx, cmd.Flags())
return cmd
}
func list(appCtx *AppContext) cli.ActionFunc {
return func(clx *cli.Context) error {
func list(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
if clx.NArg() > 0 {
return cli.ShowSubcommandHelp(clx)
}
var clusters v1alpha1.ClusterList
var clusters v1beta1.ClusterList
if err := client.List(ctx, &clusters, ctrlclient.InNamespace(appCtx.namespace)); err != nil {
return err
}
@@ -49,6 +47,6 @@ func list(appCtx *AppContext) cli.ActionFunc {
printer := printers.NewTablePrinter(printers.PrintOptions{WithNamespace: true})
return printer.PrintObj(table, clx.App.Writer)
return printer.PrintObj(table, cmd.OutOrStdout())
}
}

View File

@@ -8,108 +8,82 @@ import (
"strings"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
apierrors "k8s.io/apimachinery/pkg/api/errors"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
)
var (
type GenerateKubeconfigConfig struct {
name string
cn string
org cli.StringSlice
altNames cli.StringSlice
expirationDays int64
configName string
cn string
org []string
altNames []string
expirationDays int64
kubeconfigServerHost string
)
func newGenerateKubeconfigFlags(appCtx *AppContext) []cli.Flag {
return []cli.Flag{
&cli.StringFlag{
Name: "name",
Usage: "cluster name",
Destination: &name,
},
&cli.StringFlag{
Name: "config-name",
Usage: "the name of the generated kubeconfig file",
Destination: &configName,
},
&cli.StringFlag{
Name: "cn",
Usage: "Common name (CN) of the generated certificates for the kubeconfig",
Destination: &cn,
Value: controller.AdminCommonName,
},
&cli.StringSliceFlag{
Name: "org",
Usage: "Organization name (ORG) of the generated certificates for the kubeconfig",
Value: &org,
},
&cli.StringSliceFlag{
Name: "altNames",
Usage: "altNames of the generated certificates for the kubeconfig",
Value: &altNames,
},
&cli.Int64Flag{
Name: "expiration-days",
Usage: "Expiration date of the certificates used for the kubeconfig",
Destination: &expirationDays,
Value: 356,
},
&cli.StringFlag{
Name: "kubeconfig-server",
Usage: "override the kubeconfig server host",
Destination: &kubeconfigServerHost,
Value: "",
},
}
}
func NewKubeconfigCmd(appCtx *AppContext) *cli.Command {
return &cli.Command{
Name: "kubeconfig",
Usage: "Manage kubeconfig for clusters",
Subcommands: []*cli.Command{
NewKubeconfigGenerateCmd(appCtx),
},
func NewKubeconfigCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "kubeconfig",
Short: "Manage kubeconfig for clusters",
}
cmd.AddCommand(
NewKubeconfigGenerateCmd(appCtx),
)
return cmd
}
func NewKubeconfigGenerateCmd(appCtx *AppContext) *cli.Command {
flags := CommonFlags(appCtx)
flags = append(flags, FlagNamespace(appCtx))
flags = append(flags, newGenerateKubeconfigFlags(appCtx)...)
func NewKubeconfigGenerateCmd(appCtx *AppContext) *cobra.Command {
cfg := &GenerateKubeconfigConfig{}
return &cli.Command{
Name: "generate",
Usage: "Generate kubeconfig for clusters",
SkipFlagParsing: false,
Action: generate(appCtx),
Flags: flags,
cmd := &cobra.Command{
Use: "generate",
Short: "Generate kubeconfig for clusters",
RunE: generate(appCtx, cfg),
Args: cobra.NoArgs,
}
CobraFlagNamespace(appCtx, cmd.Flags())
generateKubeconfigFlags(cmd, cfg)
return cmd
}
func generate(appCtx *AppContext) cli.ActionFunc {
return func(clx *cli.Context) error {
func generateKubeconfigFlags(cmd *cobra.Command, cfg *GenerateKubeconfigConfig) {
cmd.Flags().StringVar(&cfg.name, "name", "", "cluster name")
cmd.Flags().StringVar(&cfg.configName, "config-name", "", "the name of the generated kubeconfig file")
cmd.Flags().StringVar(&cfg.cn, "cn", controller.AdminCommonName, "Common name (CN) of the generated certificates for the kubeconfig")
cmd.Flags().StringSliceVar(&cfg.org, "org", nil, "Organization name (ORG) of the generated certificates for the kubeconfig")
cmd.Flags().StringSliceVar(&cfg.altNames, "altNames", nil, "altNames of the generated certificates for the kubeconfig")
cmd.Flags().Int64Var(&cfg.expirationDays, "expiration-days", 365, "Expiration date of the certificates used for the kubeconfig")
cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host")
}
func generate(appCtx *AppContext, cfg *GenerateKubeconfigConfig) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
clusterKey := types.NamespacedName{
Name: name,
Namespace: appCtx.Namespace(name),
Name: cfg.name,
Namespace: appCtx.Namespace(cfg.name),
}
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := client.Get(ctx, clusterKey, &cluster); err != nil {
return err
@@ -121,25 +95,21 @@ func generate(appCtx *AppContext) cli.ActionFunc {
}
host := strings.Split(url.Host, ":")
if kubeconfigServerHost != "" {
host = []string{kubeconfigServerHost}
if err := altNames.Set(kubeconfigServerHost); err != nil {
return err
}
if cfg.kubeconfigServerHost != "" {
host = []string{cfg.kubeconfigServerHost}
cfg.altNames = append(cfg.altNames, cfg.kubeconfigServerHost)
}
certAltNames := certs.AddSANs(altNames.Value())
certAltNames := certs.AddSANs(cfg.altNames)
orgs := org.Value()
if orgs == nil {
orgs = []string{user.SystemPrivilegedGroup}
if len(cfg.org) == 0 {
cfg.org = []string{user.SystemPrivilegedGroup}
}
cfg := kubeconfig.KubeConfig{
CN: cn,
ORG: orgs,
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
kubeCfg := kubeconfig.KubeConfig{
CN: cfg.cn,
ORG: cfg.org,
ExpiryDate: time.Hour * 24 * time.Duration(cfg.expirationDays),
AltNames: certAltNames,
}
@@ -148,17 +118,17 @@ func generate(appCtx *AppContext) cli.ActionFunc {
var kubeconfig *clientcmdapi.Config
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = cfg.Generate(ctx, client, &cluster, host[0])
kubeconfig, err = kubeCfg.Generate(ctx, client, &cluster, host[0], 0)
return err
}); err != nil {
return err
}
return writeKubeconfigFile(&cluster, kubeconfig)
return writeKubeconfigFile(&cluster, kubeconfig, cfg.configName)
}
}
func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config) error {
func writeKubeconfigFile(cluster *v1beta1.Cluster, kubeconfig *clientcmdapi.Config, configName string) error {
if configName == "" {
configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml"
}
@@ -179,5 +149,5 @@ func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Con
return err
}
return os.WriteFile(configName, kubeconfigData, 0644)
return os.WriteFile(configName, kubeconfigData, 0o644)
}

View File

@@ -1,17 +1,20 @@
package cmds
import (
"github.com/urfave/cli/v2"
"github.com/spf13/cobra"
)
func NewPolicyCmd(appCtx *AppContext) *cli.Command {
return &cli.Command{
Name: "policy",
Usage: "policy command",
Subcommands: []*cli.Command{
NewPolicyCreateCmd(appCtx),
NewPolicyDeleteCmd(appCtx),
NewPolicyListCmd(appCtx),
},
func NewPolicyCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "policy",
Short: "policy command",
}
cmd.AddCommand(
NewPolicyCreateCmd(appCtx),
NewPolicyDeleteCmd(appCtx),
NewPolicyListCmd(appCtx),
)
return cmd
}

View File

@@ -4,64 +4,54 @@ import (
"context"
"errors"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/policy"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/policy"
)
type VirtualClusterPolicyCreateConfig struct {
mode string
}
func NewPolicyCreateCmd(appCtx *AppContext) *cli.Command {
func NewPolicyCreateCmd(appCtx *AppContext) *cobra.Command {
config := &VirtualClusterPolicyCreateConfig{}
flags := CommonFlags(appCtx)
flags = append(flags,
&cli.StringFlag{
Name: "mode",
Usage: "The allowed mode type of the policy",
Destination: &config.mode,
Value: "shared",
Action: func(ctx *cli.Context, value string) error {
switch value {
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)
}
},
cmd := &cobra.Command{
Use: "create",
Short: "Create new policy",
Example: "k3kcli policy create [command options] NAME",
PreRunE: func(cmd *cobra.Command, args []string) error {
switch config.mode {
case string(v1beta1.VirtualClusterMode), string(v1beta1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)
}
},
)
return &cli.Command{
Name: "create",
Usage: "Create new policy",
UsageText: "k3kcli policy create [command options] NAME",
Action: policyCreateAction(appCtx, config),
Flags: flags,
HideHelpCommand: true,
RunE: policyCreateAction(appCtx, config),
Args: cobra.ExactArgs(1),
}
cmd.Flags().StringVar(&config.mode, "mode", "shared", "The allowed mode type of the policy")
return cmd
}
func policyCreateAction(appCtx *AppContext, config *VirtualClusterPolicyCreateConfig) cli.ActionFunc {
return func(clx *cli.Context) error {
func policyCreateAction(appCtx *AppContext, config *VirtualClusterPolicyCreateConfig) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
policyName := args[0]
if clx.NArg() != 1 {
return cli.ShowSubcommandHelp(clx)
}
policyName := clx.Args().First()
_, err := createPolicy(ctx, client, v1alpha1.ClusterMode(config.mode), policyName)
_, err := createPolicy(ctx, client, v1beta1.ClusterMode(config.mode), policyName)
return err
}
@@ -91,18 +81,18 @@ func createNamespace(ctx context.Context, client client.Client, name, policyName
return nil
}
func createPolicy(ctx context.Context, client client.Client, mode v1alpha1.ClusterMode, policyName string) (*v1alpha1.VirtualClusterPolicy, error) {
func createPolicy(ctx context.Context, client client.Client, mode v1beta1.ClusterMode, policyName string) (*v1beta1.VirtualClusterPolicy, error) {
logrus.Infof("Creating policy [%s]", policyName)
policy := &v1alpha1.VirtualClusterPolicy{
policy := &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: policyName,
},
TypeMeta: metav1.TypeMeta{
Kind: "VirtualClusterPolicy",
APIVersion: "k3k.io/v1alpha1",
APIVersion: "k3k.io/v1beta1",
},
Spec: v1alpha1.VirtualClusterPolicySpec{
Spec: v1beta1.VirtualClusterPolicySpec{
AllowedMode: mode,
},
}

View File

@@ -3,35 +3,31 @@ package cmds
import (
"context"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
"github.com/spf13/cobra"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func NewPolicyDeleteCmd(appCtx *AppContext) *cli.Command {
return &cli.Command{
Name: "delete",
Usage: "Delete an existing policy",
UsageText: "k3kcli policy delete [command options] NAME",
Action: policyDeleteAction(appCtx),
Flags: CommonFlags(appCtx),
HideHelpCommand: true,
func NewPolicyDeleteCmd(appCtx *AppContext) *cobra.Command {
return &cobra.Command{
Use: "delete",
Short: "Delete an existing policy",
Example: "k3kcli policy delete [command options] NAME",
RunE: policyDeleteAction(appCtx),
Args: cobra.ExactArgs(1),
}
}
func policyDeleteAction(appCtx *AppContext) cli.ActionFunc {
return func(clx *cli.Context) error {
func policyDeleteAction(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
name := args[0]
if clx.NArg() != 1 {
return cli.ShowSubcommandHelp(clx)
}
name := clx.Args().First()
policy := &v1alpha1.VirtualClusterPolicy{}
policy := &v1beta1.VirtualClusterPolicy{}
policy.Name = name
if err := client.Delete(ctx, policy); err != nil {

View File

@@ -3,34 +3,31 @@ package cmds
import (
"context"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/urfave/cli/v2"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/printers"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func NewPolicyListCmd(appCtx *AppContext) *cli.Command {
return &cli.Command{
Name: "list",
Usage: "List all the existing policies",
UsageText: "k3kcli policy list [command options]",
Action: policyList(appCtx),
Flags: CommonFlags(appCtx),
HideHelpCommand: true,
func NewPolicyListCmd(appCtx *AppContext) *cobra.Command {
return &cobra.Command{
Use: "list",
Short: "List all the existing policies",
Example: "k3kcli policy list [command options]",
RunE: policyList(appCtx),
Args: cobra.NoArgs,
}
}
func policyList(appCtx *AppContext) cli.ActionFunc {
return func(clx *cli.Context) error {
func policyList(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
if clx.NArg() > 0 {
return cli.ShowSubcommandHelp(clx)
}
var policies v1alpha1.VirtualClusterPolicyList
var policies v1beta1.VirtualClusterPolicyList
if err := client.List(ctx, &policies); err != nil {
return err
}
@@ -45,6 +42,6 @@ func policyList(appCtx *AppContext) cli.ActionFunc {
printer := printers.NewTablePrinter(printers.PrintOptions{})
return printer.PrintObj(table, clx.App.Writer)
return printer.PrintObj(table, cmd.OutOrStdout())
}
}

View File

@@ -2,17 +2,22 @@ package cmds
import (
"fmt"
"strings"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/buildinfo"
)
type AppContext struct {
@@ -25,52 +30,53 @@ type AppContext struct {
namespace string
}
func NewApp() *cli.App {
func NewRootCmd() *cobra.Command {
appCtx := &AppContext{}
app := cli.NewApp()
app.Name = "k3kcli"
app.Usage = "CLI for K3K"
app.Flags = CommonFlags(appCtx)
rootCmd := &cobra.Command{
Use: "k3kcli",
Short: "CLI for K3K",
Version: buildinfo.Version,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
InitializeConfig(cmd)
app.Before = func(clx *cli.Context) error {
if appCtx.Debug {
logrus.SetLevel(logrus.DebugLevel)
}
if appCtx.Debug {
logrus.SetLevel(logrus.DebugLevel)
}
restConfig, err := loadRESTConfig(appCtx.Kubeconfig)
if err != nil {
return err
}
restConfig, err := loadRESTConfig(appCtx.Kubeconfig)
if err != nil {
return err
}
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
_ = v1alpha1.AddToScheme(scheme)
_ = apiextensionsv1.AddToScheme(scheme)
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
_ = v1beta1.AddToScheme(scheme)
_ = apiextensionsv1.AddToScheme(scheme)
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})
if err != nil {
return err
}
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})
if err != nil {
return err
}
appCtx.RestConfig = restConfig
appCtx.Client = ctrlClient
appCtx.RestConfig = restConfig
appCtx.Client = ctrlClient
return nil
return nil
},
DisableAutoGenTag: true,
}
app.Version = buildinfo.Version
cli.VersionPrinter = func(cCtx *cli.Context) {
fmt.Println("k3kcli Version: " + buildinfo.Version)
}
rootCmd.PersistentFlags().StringVar(&appCtx.Kubeconfig, "kubeconfig", "", "kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)")
rootCmd.PersistentFlags().BoolVar(&appCtx.Debug, "debug", false, "Turn on debug logs")
app.Commands = []*cli.Command{
rootCmd.AddCommand(
NewClusterCmd(appCtx),
NewPolicyCmd(appCtx),
NewKubeconfigCmd(appCtx),
}
)
return app
return rootCmd
}
func (ctx *AppContext) Namespace(name string) string {
@@ -94,36 +100,20 @@ func loadRESTConfig(kubeconfig string) (*rest.Config, error) {
return kubeConfig.ClientConfig()
}
func CommonFlags(appCtx *AppContext) []cli.Flag {
return []cli.Flag{
FlagDebug(appCtx),
FlagKubeconfig(appCtx),
}
func CobraFlagNamespace(appCtx *AppContext, flag *pflag.FlagSet) {
flag.StringVarP(&appCtx.namespace, "namespace", "n", "", "namespace of the k3k cluster")
}
func FlagDebug(appCtx *AppContext) *cli.BoolFlag {
return &cli.BoolFlag{
Name: "debug",
Usage: "Turn on debug logs",
Destination: &appCtx.Debug,
EnvVars: []string{"K3K_DEBUG"},
}
}
func InitializeConfig(cmd *cobra.Command) {
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
viper.AutomaticEnv()
func FlagKubeconfig(appCtx *AppContext) *cli.StringFlag {
return &cli.StringFlag{
Name: "kubeconfig",
Usage: "kubeconfig path",
Destination: &appCtx.Kubeconfig,
DefaultText: "$HOME/.kube/config or $KUBECONFIG if set",
}
}
func FlagNamespace(appCtx *AppContext) *cli.StringFlag {
return &cli.StringFlag{
Name: "namespace",
Usage: "namespace of the k3k cluster",
Aliases: []string{"n"},
Destination: &appCtx.namespace,
}
// Bind the current command's flags to viper
cmd.Flags().VisitAll(func(f *pflag.Flag) {
// Apply the viper config value to the flag when the flag is not set and viper has a value
if !f.Changed && viper.IsSet(f.Name) {
val := viper.Get(f.Name)
_ = cmd.Flags().Set(f.Name, fmt.Sprintf("%v", val))
}
})
}

View File

@@ -1,10 +1,11 @@
package cmds
import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/util/jsonpath"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// createTable creates a table to print from the printerColumn defined in the CRD spec, plus the name at the beginning
@@ -24,7 +25,7 @@ func getPrinterColumnsFromCRD(crd *apiextensionsv1.CustomResourceDefinition) []a
}
for _, version := range crd.Spec.Versions {
if version.Name == "v1alpha1" {
if version.Name == "v1beta1" {
printerColumns = append(printerColumns, version.AdditionalPrinterColumns...)
break
}
@@ -93,7 +94,7 @@ func buildRowCells(objMap map[string]any, printerColumns []apiextensionsv1.Custo
}
func toPointerSlice[T any](v []T) []*T {
var vPtr = make([]*T, len(v))
vPtr := make([]*T, len(v))
for i := range v {
vPtr[i] = &v[i]

View File

@@ -1,15 +1,14 @@
package main
import (
"os"
"github.com/sirupsen/logrus"
"github.com/rancher/k3k/cli/cmds"
"github.com/sirupsen/logrus"
)
func main() {
app := cmds.NewApp()
if err := app.Run(os.Args); err != nil {
app := cmds.NewRootCmd()
if err := app.Execute(); err != nil {
logrus.Fatal(err)
}
}

View File

@@ -6,7 +6,7 @@ This document provides advanced usage information for k3k, including detailed us
The `Cluster` resource provides a variety of fields for customizing the behavior of your virtual clusters. You can check the [CRD documentation](./crds/crd-docs.md) for the full specs.
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/cli-docs.md) documentation for more details.
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/k3kcli.md) documentation for more details.
@@ -22,7 +22,7 @@ This example creates a "shared" mode K3k cluster with:
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: my-virtual-cluster

View File

@@ -1,165 +0,0 @@
# NAME
k3kcli - CLI for K3K
# SYNOPSIS
k3kcli
```
[--debug]
[--kubeconfig]=[value]
```
**Usage**:
```
k3kcli [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
```
# GLOBAL OPTIONS
**--debug**: Turn on debug logs
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
# COMMANDS
## cluster
cluster command
### create
Create new cluster
>k3kcli cluster create [command options] NAME
**--agent-args**="": agents extra arguments
**--agent-envs**="": agents extra Envs
**--agents**="": number of agents (default: 0)
**--cluster-cidr**="": cluster CIDR
**--debug**: Turn on debug logs
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--kubeconfig-server**="": override the kubeconfig server host
**--mode**="": k3k mode type (shared, virtual) (default: "shared")
**--namespace, -n**="": namespace of the k3k cluster
**--persistence-type**="": persistence mode for the nodes (dynamic, ephemeral, static) (default: "dynamic")
**--policy**="": The policy to create the cluster in
**--server-args**="": servers extra arguments
**--server-envs**="": servers extra Envs
**--servers**="": number of servers (default: 1)
**--service-cidr**="": service CIDR
**--storage-class-name**="": storage class name for dynamic persistence type
**--storage-request-size**="": storage size for dynamic persistence type
**--token**="": token of the cluster
**--version**="": k3s version
### delete
Delete an existing cluster
>k3kcli cluster delete [command options] NAME
**--debug**: Turn on debug logs
**--keep-data**: keeps persistence volumes created for the cluster after deletion
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--namespace, -n**="": namespace of the k3k cluster
### list
List all the existing cluster
>k3kcli cluster list [command options]
**--debug**: Turn on debug logs
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--namespace, -n**="": namespace of the k3k cluster
## policy
policy command
### create
Create new policy
>k3kcli policy create [command options] NAME
**--debug**: Turn on debug logs
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--mode**="": The allowed mode type of the policy (default: "shared")
### delete
Delete an existing policy
>k3kcli policy delete [command options] NAME
**--debug**: Turn on debug logs
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
### list
List all the existing policies
>k3kcli policy list [command options]
**--debug**: Turn on debug logs
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
## kubeconfig
Manage kubeconfig for clusters
### generate
Generate kubeconfig for clusters
**--altNames**="": altNames of the generated certificates for the kubeconfig
**--cn**="": Common name (CN) of the generated certificates for the kubeconfig (default: "system:admin")
**--config-name**="": the name of the generated kubeconfig file
**--debug**: Turn on debug logs
**--expiration-days**="": Expiration date of the certificates used for the kubeconfig (default: 356)
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--kubeconfig-server**="": override the kubeconfig server host
**--name**="": cluster name
**--namespace, -n**="": namespace of the k3k cluster
**--org**="": Organization name (ORG) of the generated certificates for the kubeconfig

View File

@@ -5,19 +5,14 @@ import (
"os"
"path"
"github.com/spf13/cobra/doc"
"github.com/rancher/k3k/cli/cmds"
)
func main() {
// Instantiate the CLI application
app := cmds.NewApp()
// Generate the Markdown documentation
md, err := app.ToMarkdown()
if err != nil {
fmt.Println("Error generating documentation:", err)
os.Exit(1)
}
k3kcli := cmds.NewRootCmd()
wd, err := os.Getwd()
if err != nil {
@@ -25,13 +20,12 @@ func main() {
os.Exit(1)
}
outputFile := path.Join(wd, "docs/cli/cli-docs.md")
outputDir := path.Join(wd, "docs/cli")
err = os.WriteFile(outputFile, []byte(md), 0644)
if err != nil {
if err := doc.GenMarkdownTree(k3kcli, outputDir); err != nil {
fmt.Println("Error generating documentation:", err)
os.Exit(1)
}
fmt.Println("Documentation generated at " + outputFile)
fmt.Println("Documentation generated at " + outputDir)
}

18
docs/cli/k3kcli.md Normal file
View File

@@ -0,0 +1,18 @@
## k3kcli
CLI for K3K
### Options
```
--debug Turn on debug logs
-h, --help help for k3kcli
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters
* [k3kcli policy](k3kcli_policy.md) - policy command

View File

@@ -0,0 +1,24 @@
## k3kcli cluster
cluster command
### Options
```
-h, --help help for cluster
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli](k3kcli.md) - CLI for K3K
* [k3kcli cluster create](k3kcli_cluster_create.md) - Create new cluster
* [k3kcli cluster delete](k3kcli_cluster_delete.md) - Delete an existing cluster
* [k3kcli cluster list](k3kcli_cluster_list.md) - List all the existing cluster

View File

@@ -0,0 +1,51 @@
## k3kcli cluster create
Create new cluster
```
k3kcli cluster create [flags]
```
### Examples
```
k3kcli cluster create [command options] NAME
```
### Options
```
--agent-args strings agents extra arguments
--agent-envs strings agents extra Envs
--agents int number of agents
--cluster-cidr string cluster CIDR
--custom-certs string The path for custom certificate directory
-h, --help help for create
--kubeconfig-server string override the kubeconfig server host
--mirror-host-nodes Mirror Host Cluster Nodes
--mode string k3k mode type (shared, virtual) (default "shared")
-n, --namespace string namespace of the k3k cluster
--persistence-type string persistence mode for the nodes (dynamic, ephemeral, static) (default "dynamic")
--policy string The policy to create the cluster in
--server-args strings servers extra arguments
--server-envs strings servers extra Envs
--servers int number of servers (default 1)
--service-cidr string service CIDR
--storage-class-name string storage class name for dynamic persistence type
--storage-request-size string storage size for dynamic persistence type
--timeout duration The timeout for waiting for the cluster to become ready (e.g., 10s, 5m, 1h). (default 3m0s)
--token string token of the cluster
--version string k3s version
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli cluster](k3kcli_cluster.md) - cluster command

View File

@@ -0,0 +1,33 @@
## k3kcli cluster delete
Delete an existing cluster
```
k3kcli cluster delete [flags]
```
### Examples
```
k3kcli cluster delete [command options] NAME
```
### Options
```
-h, --help help for delete
--keep-data keeps persistence volumes created for the cluster after deletion
-n, --namespace string namespace of the k3k cluster
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli cluster](k3kcli_cluster.md) - cluster command

View File

@@ -0,0 +1,32 @@
## k3kcli cluster list
List all the existing cluster
```
k3kcli cluster list [flags]
```
### Examples
```
k3kcli cluster list [command options]
```
### Options
```
-h, --help help for list
-n, --namespace string namespace of the k3k cluster
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli cluster](k3kcli_cluster.md) - cluster command

View File

@@ -0,0 +1,22 @@
## k3kcli kubeconfig
Manage kubeconfig for clusters
### Options
```
-h, --help help for kubeconfig
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli](k3kcli.md) - CLI for K3K
* [k3kcli kubeconfig generate](k3kcli_kubeconfig_generate.md) - Generate kubeconfig for clusters

View File

@@ -0,0 +1,33 @@
## k3kcli kubeconfig generate
Generate kubeconfig for clusters
```
k3kcli kubeconfig generate [flags]
```
### Options
```
--altNames strings altNames of the generated certificates for the kubeconfig
--cn string Common name (CN) of the generated certificates for the kubeconfig (default "system:admin")
--config-name string the name of the generated kubeconfig file
--expiration-days int Expiration date of the certificates used for the kubeconfig (default 365)
-h, --help help for generate
--kubeconfig-server string override the kubeconfig server host
--name string cluster name
-n, --namespace string namespace of the k3k cluster
--org strings Organization name (ORG) of the generated certificates for the kubeconfig
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters

24
docs/cli/k3kcli_policy.md Normal file
View File

@@ -0,0 +1,24 @@
## k3kcli policy
policy command
### Options
```
-h, --help help for policy
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli](k3kcli.md) - CLI for K3K
* [k3kcli policy create](k3kcli_policy_create.md) - Create new policy
* [k3kcli policy delete](k3kcli_policy_delete.md) - Delete an existing policy
* [k3kcli policy list](k3kcli_policy_list.md) - List all the existing policies

View File

@@ -0,0 +1,32 @@
## k3kcli policy create
Create new policy
```
k3kcli policy create [flags]
```
### Examples
```
k3kcli policy create [command options] NAME
```
### Options
```
-h, --help help for create
--mode string The allowed mode type of the policy (default "shared")
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli policy](k3kcli_policy.md) - policy command

View File

@@ -0,0 +1,31 @@
## k3kcli policy delete
Delete an existing policy
```
k3kcli policy delete [flags]
```
### Examples
```
k3kcli policy delete [command options] NAME
```
### Options
```
-h, --help help for delete
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli policy](k3kcli_policy.md) - policy command

View File

@@ -0,0 +1,31 @@
## k3kcli policy list
List all the existing policies
```
k3kcli policy list [flags]
```
### Examples
```
k3kcli policy list [command options]
```
### Options
```
-h, --help help for list
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli policy](k3kcli_policy.md) - policy command

View File

@@ -1,10 +1,10 @@
# API Reference
## Packages
- [k3k.io/v1alpha1](#k3kiov1alpha1)
- [k3k.io/v1beta1](#k3kiov1beta1)
## k3k.io/v1alpha1
## k3k.io/v1beta1
### Resource Types
@@ -47,7 +47,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `Cluster` | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
@@ -65,7 +65,7 @@ ClusterList is a list of Cluster resources.
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `ClusterList` | | |
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `items` _[Cluster](#cluster) array_ | | | |
@@ -86,6 +86,19 @@ _Appears in:_
#### ClusterPhase
_Underlying type:_ _string_
ClusterPhase is a high-level summary of the cluster's current lifecycle state.
_Appears in:_
- [ClusterStatus](#clusterstatus)
#### ClusterSpec
@@ -106,7 +119,7 @@ _Appears in:_
| `clusterCIDR` _string_ | ClusterCIDR is the CIDR range for pod IPs.<br />Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.<br />This field is immutable. | | |
| `serviceCIDR` _string_ | ServiceCIDR is the CIDR range for service IPs.<br />Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.<br />This field is immutable. | | |
| `clusterDNS` _string_ | ClusterDNS is the IP address for the CoreDNS service.<br />Must be within the ServiceCIDR range. Defaults to 10.43.0.10.<br />This field is immutable. | | |
| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence specifies options for persisting etcd data.<br />Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.<br />A default StorageClass is required for dynamic persistence. | \{ type:dynamic \} | |
| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence specifies options for persisting etcd data.<br />Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.<br />A default StorageClass is required for dynamic persistence. | | |
| `expose` _[ExposeConfig](#exposeconfig)_ | Expose specifies options for exposing the API server.<br />By default, it's only exposed as a ClusterIP. | | |
| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector specifies node labels to constrain where server/agent pods are scheduled.<br />In "shared" mode, this also applies to workloads. | | |
| `priorityClass` _string_ | PriorityClass specifies the priorityClassName for server/agent pods.<br />In "shared" mode, this also applies to workloads. | | |
@@ -119,10 +132,86 @@ _Appears in:_
| `addons` _[Addon](#addon) array_ | Addons specifies secrets containing raw YAML to deploy on cluster startup. | | |
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
| `mirrorHostNodes` _boolean_ | MirrorHostNodes controls whether node objects from the host cluster<br />are mirrored into the virtual cluster. | | |
| `customCAs` _[CustomCAs](#customcas)_ | CustomCAs specifies the cert/key pairs for custom CA certificates. | | |
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |
#### ConfigMapSyncConfig
ConfigMapSyncConfig specifies the sync options for services.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### CredentialSource
CredentialSource defines where to get a credential from.
It can represent either a TLS key pair or a single private key.
_Appears in:_
- [CredentialSources](#credentialsources)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `secretName` _string_ | SecretName specifies the name of an existing secret to use.<br />The controller expects specific keys inside based on the credential type:<br />- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.<br />- For ServiceAccountTokenKey: 'tls.key'. | | |
#### CredentialSources
CredentialSources lists all the required credentials, including both
TLS key pairs and single signing keys.
_Appears in:_
- [CustomCAs](#customcas)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `serverCA` _[CredentialSource](#credentialsource)_ | ServerCA specifies the server-ca cert/key pair. | | |
| `clientCA` _[CredentialSource](#credentialsource)_ | ClientCA specifies the client-ca cert/key pair. | | |
| `requestHeaderCA` _[CredentialSource](#credentialsource)_ | RequestHeaderCA specifies the request-header-ca cert/key pair. | | |
| `etcdServerCA` _[CredentialSource](#credentialsource)_ | ETCDServerCA specifies the etcd-server-ca cert/key pair. | | |
| `etcdPeerCA` _[CredentialSource](#credentialsource)_ | ETCDPeerCA specifies the etcd-peer-ca cert/key pair. | | |
| `serviceAccountToken` _[CredentialSource](#credentialsource)_ | ServiceAccountToken specifies the service-account-token key. | | |
#### CustomCAs
CustomCAs specifies the cert/key pairs for custom CA certificates.
_Appears in:_
- [ClusterSpec](#clusterspec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled toggles this feature on or off. | true | |
| `sources` _[CredentialSources](#credentialsources)_ | Sources defines the sources for all required custom CA certificates. | | |
#### ExposeConfig
@@ -137,7 +226,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `ingress` _[IngressConfig](#ingressconfig)_ | Ingress specifies options for exposing the API server through an Ingress. | | |
| `loadbalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. | | |
| `loadBalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. | | |
| `nodePort` _[NodePortConfig](#nodeportconfig)_ | NodePort specifies options for exposing the API server through NodePort. | | |
@@ -158,6 +247,23 @@ _Appears in:_
| `ingressClassName` _string_ | IngressClassName specifies the IngressClass to use for the Ingress. | | |
#### IngressSyncConfig
IngressSyncConfig specifies the sync options for services.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### LoadBalancerConfig
@@ -202,13 +308,12 @@ PersistenceConfig specifies options for persisting etcd data.
_Appears in:_
- [ClusterSpec](#clusterspec)
- [ClusterStatus](#clusterstatus)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `type` _[PersistenceMode](#persistencemode)_ | Type specifies the persistence mode. | dynamic | |
| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 2G | |
#### PersistenceMode
@@ -224,6 +329,23 @@ _Appears in:_
#### PersistentVolumeClaimSyncConfig
PersistentVolumeClaimSyncConfig specifies the sync options for services.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### PodSecurityAdmissionLevel
_Underlying type:_ _string_
@@ -238,6 +360,79 @@ _Appears in:_
#### PriorityClassSyncConfig
PriorityClassSyncConfig specifies the sync options for services.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### SecretSyncConfig
SecretSyncConfig specifies the sync options for services.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### ServiceSyncConfig
ServiceSyncConfig specifies the sync options for services.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### SyncConfig
SyncConfig will contain the resources that should be synced from virtual cluster to host cluster.
_Appears in:_
- [ClusterSpec](#clusterspec)
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `services` _[ServiceSyncConfig](#servicesyncconfig)_ | Services resources sync configuration. | \{ enabled:true \} | |
| `configMaps` _[ConfigMapSyncConfig](#configmapsyncconfig)_ | ConfigMaps resources sync configuration. | \{ enabled:true \} | |
| `secrets` _[SecretSyncConfig](#secretsyncconfig)_ | Secrets resources sync configuration. | \{ enabled:true \} | |
| `ingresses` _[IngressSyncConfig](#ingresssyncconfig)_ | Ingresses resources sync configuration. | \{ enabled:false \} | |
| `persistentVolumeClaims` _[PersistentVolumeClaimSyncConfig](#persistentvolumeclaimsyncconfig)_ | PersistentVolumeClaims resources sync configuration. | \{ enabled:true \} | |
| `priorityClasses` _[PriorityClassSyncConfig](#priorityclasssyncconfig)_ | PriorityClasses resources sync configuration. | \{ enabled:false \} | |
#### VirtualClusterPolicy
@@ -252,7 +447,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `VirtualClusterPolicy` | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[VirtualClusterPolicySpec](#virtualclusterpolicyspec)_ | Spec defines the desired state of the VirtualClusterPolicy. | \{ \} | |
@@ -270,7 +465,7 @@ VirtualClusterPolicyList is a list of VirtualClusterPolicy resources.
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `VirtualClusterPolicyList` | | |
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `items` _[VirtualClusterPolicy](#virtualclusterpolicy) array_ | | | |
@@ -296,6 +491,7 @@ _Appears in:_
| `allowedMode` _[ClusterMode](#clustermode)_ | AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared". | shared | Enum: [shared virtual] <br /> |
| `disableNetworkPolicy` _boolean_ | DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation. | | |
| `podSecurityAdmissionLevel` _[PodSecurityAdmissionLevel](#podsecurityadmissionlevel)_ | PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace. | | Enum: [privileged baseline restricted] <br /> |
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |

View File

@@ -41,6 +41,7 @@ To see all the available Make commands you can run `make help`, i.e:
test Run all the tests
test-unit Run the unit tests (skips the e2e)
test-controller Run the controller tests (pkg/controller)
test-kubelet-controller Run the controller tests (pkg/controller)
test-e2e Run the e2e tests
generate Generate the CRDs specs
docs Build the CRDs and CLI docs
@@ -129,7 +130,7 @@ Create then the virtual cluster exposing through NodePort one of the ports that
```bash
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster

View File

@@ -32,18 +32,27 @@ Load these images into your internal (air-gapped) registry.
Update the `values.yaml` file in the K3k Helm chart with air gap settings:
```yaml
image:
repository: rancher/k3k
tag: "" # Specify the version tag
pullPolicy: "" # Optional: "IfNotPresent", "Always", etc.
sharedAgent:
controller:
imagePullSecrets: [] # Optional
image:
repository: rancher/k3k-kubelet
tag: "" # Specify the version tag
pullPolicy: "" # Optional
repository: rancher/k3k
tag: "" # Specify the version tag
pullPolicy: "" # Optional: "IfNotPresent", "Always", etc.
k3sServer:
agent:
imagePullSecrets: []
virtual:
image:
repository: rancher/k3s
pullPolicy: "" # Optional
shared:
image:
repository: rancher/k3k-kubelet
tag: "" # Specify the version tag
pullPolicy: "" # Optional
server:
imagePullSecrets: [] # Optional
image:
repository: rancher/k3s
pullPolicy: "" # Optional

View File

@@ -17,7 +17,7 @@ This guide walks through the various ways to create and manage virtual clusters
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-ingress
@@ -46,7 +46,7 @@ This will create a virtual cluster in `shared` mode and expose it via an ingress
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-persistent
@@ -80,7 +80,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-ha
@@ -105,7 +105,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-virtual
@@ -136,7 +136,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-ephemeral
@@ -162,7 +162,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-custom-k8s
@@ -189,7 +189,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-resourced
@@ -216,7 +216,7 @@ This configures the CPU and memory limit for the virtual cluster.
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-node-placed
@@ -259,7 +259,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-http-proxy

View File

@@ -0,0 +1,147 @@
# Troubleshooting
This guide walks through common troubleshooting steps for working with K3K virtual clusters.
---
## `too many open files` error
The `k3k-kubelet` or `k3kcluster-server-` run into the following issue:
```sh
E0604 13:14:53.369369 1 leaderelection.go:336] error initially creating leader election record: Post "https://k3k-http-proxy-k3kcluster-service/apis/coordination.k8s.io/v1/namespaces/kube-system/leases": context canceled
{"level":"fatal","timestamp":"2025-06-04T13:14:53.369Z","logger":"k3k-kubelet","msg":"virtual manager stopped","error":"too many open files"}
```
This typically indicates a low limit on inotify watchers or file descriptors on the host system.
To increase the inotify limits connect to the host nodes and run:
```sh
sudo sysctl -w fs.inotify.max_user_watches=2099999999
sudo sysctl -w fs.inotify.max_user_instances=2099999999
sudo sysctl -w fs.inotify.max_queued_events=2099999999
```
You can persist these settings by adding them to `/etc/sysctl.conf`:
```sh
fs.inotify.max_user_watches=2099999999
fs.inotify.max_user_instances=2099999999
fs.inotify.max_queued_events=2099999999
```
Apply the changes:
```sh
sudo sysctl -p
```
You can find more details in this [KB document](https://www.suse.com/support/kb/doc/?id=000020048).
---
## Inspect Controller Logs for Failure Diagnosis
To view logs for a failed virtual cluster:
```sh
kubectl logs -n k3k-system -l app.kubernetes.io/name=k3k
```
This retrieves logs from K3k controller components.
---
## Inspect Cluster Logs for Failure Diagnosis
To view logs for a failed virtual cluster:
```sh
kubectl logs -n <cluster_namespace> -l cluster=<cluster_name>
```
This retrieves logs from K3k cluster components (`agents, server and virtual-kubelet`).
> 💡 You can also use `kubectl describe cluster <cluster_name>` to check for recent events and status conditions.
---
## Virtual Cluster Not Starting or Stuck in Pending
Some of the most common causes are related to missing prerequisites or wrong configuration.
### Storage class not available
When creating a Virtual Cluster with `dynamic` persistence, a PVC is needed. You can check if the PVC was claimed but not bound with `kubectl get pvc -n <cluster_namespace>`. If you see a pending PVC you probably don't have a default storage class defined, or you have specified a wrong one.
#### Example with wrong storage class
The `pvc` is pending:
```bash
kubectl get pvc -n k3k-test-storage
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
varlibrancherk3s-k3k-test-storage-server-0 Pending not-available <unset> 4s
```
The `server` is pending:
```bash
kubectl get po -n k3k-test-storage
NAME READY STATUS RESTARTS AGE
k3k-test-storage-kubelet-j4zn5 1/1 Running 0 54s
k3k-test-storage-server-0 0/1 Pending 0 54s
```
To fix this you should use a valid storage class, you can list existing storage class using:
```bash
kubectl get storageclasses.storage.k8s.io
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 3d6h
```
### Wrong node selector
When creating a Virtual Cluster with `defaultNodeSelector`, if the selector is not valid all pods will be pending.
#### Example
The `server` is pending:
```bash
kubectl get po
NAME READY STATUS RESTARTS AGE
k3k-k3kcluster-node-placed-server-0 0/1 Pending 0 58s
```
The description of the pod provide the reason:
```bash
kubectl describe po k3k-k3kcluster-node-placed-server-0
...
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 84s default-scheduler 0/1 nodes are available: 1 node(s) didn't match Pod's node affinity/selector. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.
```
To fix this you should use a valid node affinity/selector.
### Image pull issues (airgapped setup)
When creating a Virtual Cluster in air-gapped environment, images need to be available in the configured registry. You can check for `ImagePullBackOff` status when getting the pods in the virtual cluster namespace.
#### Example
The `server` is failing:
```bash
kubectl get po -n k3k-test-registry
NAME READY STATUS RESTARTS AGE
k3k-test-registry-kubelet-r4zh5 1/1 Running 0 54s
k3k-test-registry-server-0 0/1 ImagePullBackOff 0 54s
```
To fix this make sure the failing image is available. You can describe the failing pod to get more details.

View File

@@ -37,7 +37,7 @@ If you create a `VirtualClusterPolicy` without specifying any `spec` fields (e.g
```yaml
# Example of a minimal VCP (after creation with defaults)
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: my-default-policy
@@ -56,7 +56,7 @@ You can restrict the `mode` (e.g., "shared" or "virtual") in which K3k `Cluster`
**Example:** Allow only "shared" mode clusters.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: shared-only-policy
@@ -74,7 +74,7 @@ You can define resource consumption limits for bound Namespaces by specifying a
**Example:** Set CPU, memory, and pod limits.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: quota-policy
@@ -93,7 +93,7 @@ You can define default resource requests/limits and min/max constraints for cont
**Example:** Define default CPU requests/limits and min/max CPU.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: limit-policy
@@ -118,7 +118,7 @@ By default, K3k creates a `NetworkPolicy` in bound Namespaces to provide network
**Example:** Disable the default NetworkPolicy.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: no-default-netpol-policy
@@ -133,7 +133,7 @@ You can enforce Pod Security Standards (PSS) by specifying a Pod Security Admiss
**Example:** Enforce the "baseline" PSS level.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: baseline-psa-policy

View File

@@ -1,19 +0,0 @@
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: example1
spec:
mode: "shared"
servers: 1
agents: 3
token: test
version: v1.26.0-k3s2
clusterCIDR: 10.30.0.0/16
serviceCIDR: 10.31.0.0/16
clusterDNS: 10.30.0.10
serverArgs:
- "--write-kubeconfig-mode=777"
expose:
ingress:
enabled: true
ingressClassName: "nginx"

View File

@@ -0,0 +1,15 @@
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: shared-multiple-servers
spec:
mode: shared
servers: 3
agents: 3
version: v1.33.1-k3s1
serverArgs:
- "--write-kubeconfig-mode=777"
tlsSANs:
- myserver.app
expose:
nodePort: {}

View File

@@ -0,0 +1,14 @@
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: shared-single-server
spec:
mode: shared
servers: 1
version: v1.33.1-k3s1
serverArgs:
- "--write-kubeconfig-mode=777"
tlsSANs:
- myserver.app
expose:
nodePort: {}

View File

@@ -1,19 +0,0 @@
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: single-server
spec:
mode: "shared"
servers: 1
agents: 3
token: test
version: v1.26.0-k3s2
clusterCIDR: 10.30.0.0/16
serviceCIDR: 10.31.0.0/16
clusterDNS: 10.30.0.10
serverArgs:
- "--write-kubeconfig-mode=777"
expose:
ingress:
enabled: true
ingressClassName: "nginx"

View File

@@ -0,0 +1,13 @@
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: virtual-server
spec:
mode: virtual
servers: 3
agents: 3
version: v1.33.1-k3s1
tlsSANs:
- myserver.app
expose:
nodePort: {}

View File

@@ -1,9 +1,9 @@
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: policy-example
# spec:
# disableNetworkPolicy: false
# allowedMode: "shared"
spec:
allowedMode: shared
disableNetworkPolicy: true
# podSecurityAdmissionLevel: "baseline"
# defaultPriorityClass: "lowpriority"

37
go.mod
View File

@@ -11,16 +11,17 @@ replace (
)
require (
github.com/go-logr/logr v1.4.2
github.com/go-logr/zapr v1.3.0
github.com/google/go-cmp v0.7.0
github.com/onsi/ginkgo/v2 v2.21.0
github.com/onsi/gomega v1.36.0
github.com/rancher/dynamiclistener v1.27.5
github.com/sirupsen/logrus v1.9.3
github.com/spf13/viper v1.20.1
github.com/stretchr/testify v1.10.0
github.com/testcontainers/testcontainers-go v0.35.0
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0
github.com/urfave/cli/v2 v2.27.5
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803
go.etcd.io/etcd/api/v3 v3.5.16
go.etcd.io/etcd/client/v3 v3.5.16
@@ -37,12 +38,11 @@ require (
k8s.io/component-helpers v0.31.4
k8s.io/kubectl v0.31.4
k8s.io/kubelet v0.31.4
k8s.io/kubernetes v1.31.4
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/controller-runtime v0.19.4
)
require github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
require (
dario.cat/mergo v1.0.1 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
@@ -55,6 +55,7 @@ require (
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
@@ -68,7 +69,7 @@ require (
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/dockercfg v0.3.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
@@ -85,17 +86,17 @@ require (
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@@ -152,6 +153,7 @@ require (
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@@ -162,13 +164,17 @@ require (
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rubenv/sql-migrate v1.7.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/x448/float16 v0.8.4 // indirect
@@ -176,13 +182,12 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
go.opentelemetry.io/otel v1.33.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
@@ -202,15 +207,15 @@ require (
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.26.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
google.golang.org/grpc v1.65.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect
google.golang.org/grpc v1.67.3 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/klog/v2 v2.130.1
k8s.io/kms v0.31.4 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
oras.land/oras-go v1.2.5 // indirect

65
go.sum
View File

@@ -82,9 +82,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
@@ -138,8 +137,8 @@ github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6
github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
@@ -167,6 +166,8 @@ github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpv
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@@ -352,6 +353,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
@@ -385,6 +388,8 @@ github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmi
github.com/rubenv/sql-migrate v1.7.1/go.mod h1:Ob2Psprc0/3ggbM6wCzyYVFFuc6FyZrb2AS+ezLDFb4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo=
github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
@@ -399,12 +404,18 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=
github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4=
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -424,6 +435,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/testcontainers/testcontainers-go v0.35.0 h1:uADsZpTKFAtp8SLK+hMwSaa+X+JiERHtd4sQAFmXeMo=
github.com/testcontainers/testcontainers-go v0.35.0/go.mod h1:oEVBj5zrfJTrgjwONs1SsRbnBtH9OKl+IGl3UMcr2B4=
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0 h1:zEfdO1Dz7sA2jNpf1PVCOI6FND1t/mDpaeDCguaLRXw=
@@ -434,8 +447,6 @@ github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+F
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w=
github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803 h1:0O149bxUoQL69b4+pcGaCbKk2bvA/43AhkczkDuRjMc=
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803/go.mod h1:SHfH2bqArcMTBh/JejdbtsyZwmYYqkpJnABOyipjT54=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
@@ -452,8 +463,6 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
@@ -485,10 +494,10 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
@@ -638,19 +647,19 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg=
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic=
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8=
google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -717,6 +726,8 @@ k8s.io/kubectl v0.31.4 h1:c8Af8xd1VjyoKyWMW0xHv2+tYxEjne8s6OOziMmaD10=
k8s.io/kubectl v0.31.4/go.mod h1:0E0rpXg40Q57wRE6LB9su+4tmwx1IzZrmIEvhQPk0i4=
k8s.io/kubelet v0.31.4 h1:6TokbMv+HnFG7Oe9tVS/J0VPGdC4GnsQZXuZoo7Ixi8=
k8s.io/kubelet v0.31.4/go.mod h1:8ZM5LZyANoVxUtmayUxD/nsl+6GjREo7kSanv8AoL4U=
k8s.io/kubernetes v1.31.4 h1:VQDX52gTQnq8C/jCo48AQuDsWbWMh9XXxhQRDYjgakw=
k8s.io/kubernetes v1.31.4/go.mod h1:9xmT2buyTYj8TRKwRae7FcuY8k5+xlxv7VivvO0KKfs=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo=

View File

@@ -2,73 +2,22 @@ package main
import (
"errors"
"os"
"gopkg.in/yaml.v2"
)
// config has all virtual-kubelet startup options
type config struct {
ClusterName string `yaml:"clusterName,omitempty"`
ClusterNamespace string `yaml:"clusterNamespace,omitempty"`
ServiceName string `yaml:"serviceName,omitempty"`
Token string `yaml:"token,omitempty"`
AgentHostname string `yaml:"agentHostname,omitempty"`
HostConfigPath string `yaml:"hostConfigPath,omitempty"`
VirtualConfigPath string `yaml:"virtualConfigPath,omitempty"`
KubeletPort string `yaml:"kubeletPort,omitempty"`
ServerIP string `yaml:"serverIP,omitempty"`
Version string `yaml:"version,omitempty"`
}
func (c *config) unmarshalYAML(data []byte) error {
var conf config
if err := yaml.Unmarshal(data, &conf); err != nil {
return err
}
if c.ClusterName == "" {
c.ClusterName = conf.ClusterName
}
if c.ClusterNamespace == "" {
c.ClusterNamespace = conf.ClusterNamespace
}
if c.HostConfigPath == "" {
c.HostConfigPath = conf.HostConfigPath
}
if c.VirtualConfigPath == "" {
c.VirtualConfigPath = conf.VirtualConfigPath
}
if c.KubeletPort == "" {
c.KubeletPort = conf.KubeletPort
}
if c.AgentHostname == "" {
c.AgentHostname = conf.AgentHostname
}
if c.ServiceName == "" {
c.ServiceName = conf.ServiceName
}
if c.Token == "" {
c.Token = conf.Token
}
if c.ServerIP == "" {
c.ServerIP = conf.ServerIP
}
if c.Version == "" {
c.Version = conf.Version
}
return nil
ClusterName string `mapstructure:"clusterName"`
ClusterNamespace string `mapstructure:"clusterNamespace"`
ServiceName string `mapstructure:"serviceName"`
Token string `mapstructure:"token"`
AgentHostname string `mapstructure:"agentHostname"`
HostKubeconfig string `mapstructure:"hostKubeconfig"`
VirtKubeconfig string `mapstructure:"virtKubeconfig"`
KubeletPort int `mapstructure:"kubeletPort"`
WebhookPort int `mapstructure:"webhookPort"`
ServerIP string `mapstructure:"serverIP"`
Version string `mapstructure:"version"`
MirrorHostNodes bool `mapstructure:"mirrorHostNodes"`
}
func (c *config) validate() error {
@@ -86,16 +35,3 @@ func (c *config) validate() error {
return nil
}
func (c *config) parse(path string) error {
if _, err := os.Stat(path); os.IsNotExist(err) {
return nil
}
b, err := os.ReadFile(path)
if err != nil {
return err
}
return c.unmarshalYAML(b)
}

View File

@@ -1,195 +0,0 @@
package controller
import (
"context"
"fmt"
"sync"
"github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const ConfigMapSyncerName = "configmap-syncer"
type ConfigMapSyncer struct {
mutex sync.RWMutex
// VirtualClient is the client for the virtual cluster
VirtualClient client.Client
// CoreClient is the client for the host cluster
HostClient client.Client
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
// representation
TranslateFunc func(*corev1.ConfigMap) (*corev1.ConfigMap, error)
// Logger is the logger that the controller will use
Logger *k3klog.Logger
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
// through add/remove
objs sets.Set[types.NamespacedName]
}
func (c *ConfigMapSyncer) Name() string {
return ConfigMapSyncerName
}
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
if !c.isWatching(req.NamespacedName) {
// return immediately without re-enqueueing. We aren't watching this resource
return reconcile.Result{}, nil
}
var virtual corev1.ConfigMap
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to get configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translated, err := c.TranslateFunc(&virtual)
if err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to translate configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translatedKey := types.NamespacedName{
Namespace: translated.Namespace,
Name: translated.Name,
}
var host corev1.ConfigMap
if err = c.HostClient.Get(ctx, translatedKey, &host); err != nil {
if apierrors.IsNotFound(err) {
err = c.HostClient.Create(ctx, translated)
// for simplicity's sake, we don't check for conflict errors. The existing object will get
// picked up on in the next re-enqueue
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to create host configmap %s/%s for virtual configmap %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host configmap %s/%s: %w", translated.Namespace, translated.Name, err)
}
// we are going to use the host in order to avoid conflicts on update
host.Data = translated.Data
if host.Labels == nil {
host.Labels = make(map[string]string, len(translated.Labels))
}
// we don't want to override labels made on the host cluster by other applications
// but we do need to make sure the labels that the kubelet uses to track host cluster values
// are being tracked appropriately
for key, value := range translated.Labels {
host.Labels[key] = value
}
if err = c.HostClient.Update(ctx, &host); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to update host configmap %s/%s for virtual configmap %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{}, nil
}
// isWatching is a utility method to determine if a key is in objs without the caller needing
// to handle mutex lock/unlock.
func (c *ConfigMapSyncer) isWatching(key types.NamespacedName) bool {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.objs.Has(key)
}
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
// same resource.
func (c *ConfigMapSyncer) AddResource(ctx context.Context, namespace, name string) error {
objKey := types.NamespacedName{
Namespace: namespace,
Name: name,
}
// if we already sync this object, no need to writelock/add it
if c.isWatching(objKey) {
return nil
}
// lock in write mode since we are now adding the key
c.mutex.Lock()
if c.objs == nil {
c.objs = sets.Set[types.NamespacedName]{}
}
c.objs = c.objs.Insert(objKey)
c.mutex.Unlock()
_, err := c.Reconcile(ctx, reconcile.Request{
NamespacedName: objKey,
})
if err != nil {
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
}
return nil
}
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
// removed resource.
func (c *ConfigMapSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
objKey := types.NamespacedName{
Namespace: namespace,
Name: name,
}
// if we don't sync this object, no need to writelock/add it
if !c.isWatching(objKey) {
return nil
}
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
}, func() error {
return c.removeHostConfigMap(ctx, namespace, name)
}); err != nil {
return fmt.Errorf("unable to remove configmap: %w", err)
}
c.mutex.Lock()
if c.objs == nil {
c.objs = sets.Set[types.NamespacedName]{}
}
c.objs = c.objs.Delete(objKey)
c.mutex.Unlock()
return nil
}
func (c *ConfigMapSyncer) removeHostConfigMap(ctx context.Context, virtualNamespace, virtualName string) error {
var vConfigMap corev1.ConfigMap
key := types.NamespacedName{
Namespace: virtualNamespace,
Name: virtualName,
}
if err := c.VirtualClient.Get(ctx, key, &vConfigMap); err != nil {
return fmt.Errorf("unable to get virtual configmap %s/%s: %w", virtualNamespace, virtualName, err)
}
translated, err := c.TranslateFunc(&vConfigMap)
if err != nil {
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
}
return c.HostClient.Delete(ctx, translated)
}

View File

@@ -1,132 +0,0 @@
package controller
import (
"context"
"fmt"
"sync"
"github.com/rancher/k3k/k3k-kubelet/translate"
k3klog "github.com/rancher/k3k/pkg/log"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
type ControllerHandler struct {
sync.RWMutex
// Mgr is the manager used to run new controllers - from the virtual cluster
Mgr manager.Manager
// Scheme is the scheme used to run new controllers - from the virtual cluster
Scheme runtime.Scheme
// HostClient is the client used to communicate with the host cluster
HostClient client.Client
// VirtualClient is the client used to communicate with the virtual cluster
VirtualClient client.Client
// Translator is the translator that will be used to adjust objects before they
// are made on the host cluster
Translator translate.ToHostTranslator
// Logger is the logger that the controller will use to log errors
Logger *k3klog.Logger
// controllers are the controllers which are currently running
controllers map[schema.GroupVersionKind]updateableReconciler
}
// updateableReconciler is a reconciler that only syncs specific resources (by name/namespace). This list can
// be altered through the Add and Remove methods
type updateableReconciler interface {
reconcile.Reconciler
Name() string
AddResource(ctx context.Context, namespace string, name string) error
RemoveResource(ctx context.Context, namespace string, name string) error
}
func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object) error {
c.RLock()
controllers := c.controllers
if controllers != nil {
if r, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]; ok {
err := r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
c.RUnlock()
return err
}
}
// we need to manually lock/unlock since we intned on write locking to add a new controller
c.RUnlock()
var r updateableReconciler
switch obj.(type) {
case *v1.Secret:
r = &SecretSyncer{
HostClient: c.HostClient,
VirtualClient: c.VirtualClient,
// TODO: Need actual function
TranslateFunc: func(s *v1.Secret) (*v1.Secret, error) {
// note that this doesn't do any type safety - fix this
// when generics work
c.Translator.TranslateTo(s)
// Remove service-account-token types when synced to the host
if s.Type == v1.SecretTypeServiceAccountToken {
s.Type = v1.SecretTypeOpaque
}
return s, nil
},
Logger: c.Logger,
}
case *v1.ConfigMap:
r = &ConfigMapSyncer{
HostClient: c.HostClient,
VirtualClient: c.VirtualClient,
// TODO: Need actual function
TranslateFunc: func(s *v1.ConfigMap) (*v1.ConfigMap, error) {
c.Translator.TranslateTo(s)
return s, nil
},
Logger: c.Logger,
}
default:
// TODO: Technically, the configmap/secret syncers are relatively generic, and this
// logic could be used for other types.
return fmt.Errorf("unrecognized type: %T", obj)
}
err := ctrl.NewControllerManagedBy(c.Mgr).
Named(r.Name()).
For(&v1.ConfigMap{}).
Complete(r)
if err != nil {
return fmt.Errorf("unable to start configmap controller: %w", err)
}
c.Lock()
if c.controllers == nil {
c.controllers = map[schema.GroupVersionKind]updateableReconciler{}
}
c.controllers[obj.GetObjectKind().GroupVersionKind()] = r
c.Unlock()
return r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
}
func (c *ControllerHandler) RemoveResource(ctx context.Context, obj client.Object) error {
// since we aren't adding a new controller, we don't need to lock
c.RLock()
ctrl, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]
c.RUnlock()
if !ok {
return fmt.Errorf("no controller found for gvk %s", obj.GetObjectKind().GroupVersionKind())
}
return ctrl.RemoveResource(ctx, obj.GetNamespace(), obj.GetName())
}

View File

@@ -1,118 +0,0 @@
package controller
import (
"context"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
pvcController = "pvc-syncer-controller"
pvcFinalizerName = "pvc.k3k.io/finalizer"
)
type PVCReconciler struct {
clusterName string
clusterNamespace string
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
}
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
translator := translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := PVCReconciler{
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
}
return ctrl.NewControllerManagedBy(virtMgr).
Named(pvcController).
For(&v1.PersistentVolumeClaim{}).
Complete(&reconciler)
}
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
virtPVC v1.PersistentVolumeClaim
cluster v1alpha1.Cluster
)
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedPVC := r.pvc(&virtPVC)
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostScheme); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtPVC.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
if err := r.hostClient.Delete(ctx, syncedPVC); !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// remove the finalizer after cleaning up the synced service
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
return reconcile.Result{}, err
}
}
// create the pvc on host
log.Info("creating the persistent volume for the first time on the host cluster")
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
// handled by the host cluster.
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.hostClient.Create(ctx, syncedPVC))
}
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
hostPVC := obj.DeepCopy()
r.Translator.TranslateTo(hostPVC)
return hostPVC
}

View File

@@ -1,192 +0,0 @@
package controller
import (
"context"
"fmt"
"sync"
"github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const SecretSyncerName = "secret-syncer"
type SecretSyncer struct {
mutex sync.RWMutex
// VirtualClient is the client for the virtual cluster
VirtualClient client.Client
// CoreClient is the client for the host cluster
HostClient client.Client
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
// representation
TranslateFunc func(*corev1.Secret) (*corev1.Secret, error)
// Logger is the logger that the controller will use
Logger *k3klog.Logger
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
// through add/remove
objs sets.Set[types.NamespacedName]
}
func (s *SecretSyncer) Name() string {
return SecretSyncerName
}
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
if !s.isWatching(req.NamespacedName) {
// return immediately without re-enqueueing. We aren't watching this resource
return reconcile.Result{}, nil
}
var virtual corev1.Secret
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to get secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translated, err := s.TranslateFunc(&virtual)
if err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to translate secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translatedKey := types.NamespacedName{
Namespace: translated.Namespace,
Name: translated.Name,
}
var host corev1.Secret
if err = s.HostClient.Get(ctx, translatedKey, &host); err != nil {
if apierrors.IsNotFound(err) {
err = s.HostClient.Create(ctx, translated)
// for simplicity's sake, we don't check for conflict errors. The existing object will get
// picked up on in the next re-enqueue
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to create host secret %s/%s for virtual secret %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host secret %s/%s: %w", translated.Namespace, translated.Name, err)
}
// we are going to use the host in order to avoid conflicts on update
host.Data = translated.Data
if host.Labels == nil {
host.Labels = make(map[string]string, len(translated.Labels))
}
// we don't want to override labels made on the host cluster by other applications
// but we do need to make sure the labels that the kubelet uses to track host cluster values
// are being tracked appropriately
for key, value := range translated.Labels {
host.Labels[key] = value
}
if err = s.HostClient.Update(ctx, &host); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to update host secret %s/%s for virtual secret %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{}, nil
}
// isWatching is a utility method to determine if a key is in objs without the caller needing
// to handle mutex lock/unlock.
func (s *SecretSyncer) isWatching(key types.NamespacedName) bool {
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.objs.Has(key)
}
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
// same resource.
func (s *SecretSyncer) AddResource(ctx context.Context, namespace, name string) error {
objKey := types.NamespacedName{
Namespace: namespace,
Name: name,
}
// if we already sync this object, no need to writelock/add it
if s.isWatching(objKey) {
return nil
}
// lock in write mode since we are now adding the key
s.mutex.Lock()
if s.objs == nil {
s.objs = sets.Set[types.NamespacedName]{}
}
s.objs = s.objs.Insert(objKey)
s.mutex.Unlock()
_, err := s.Reconcile(ctx, reconcile.Request{
NamespacedName: objKey,
})
if err != nil {
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
}
return nil
}
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
// removed resource.
func (s *SecretSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
objKey := types.NamespacedName{
Namespace: namespace,
Name: name,
}
// if we don't sync this object, no need to writelock/add it
if !s.isWatching(objKey) {
return nil
}
// lock in write mode since we are now adding the key
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
}, func() error {
return s.removeHostSecret(ctx, namespace, name)
}); err != nil {
return fmt.Errorf("unable to remove secret: %w", err)
}
s.mutex.Lock()
if s.objs == nil {
s.objs = sets.Set[types.NamespacedName]{}
}
s.objs = s.objs.Delete(objKey)
s.mutex.Unlock()
return nil
}
func (s *SecretSyncer) removeHostSecret(ctx context.Context, virtualNamespace, virtualName string) error {
var vSecret corev1.Secret
err := s.VirtualClient.Get(ctx, types.NamespacedName{
Namespace: virtualNamespace,
Name: virtualName,
}, &vSecret)
if err != nil {
return fmt.Errorf("unable to get virtual secret %s/%s: %w", virtualNamespace, virtualName, err)
}
translated, err := s.TranslateFunc(&vSecret)
if err != nil {
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
}
return s.HostClient.Delete(ctx, translated)
}

View File

@@ -0,0 +1,150 @@
package syncer
import (
"context"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
configMapControllerName = "configmap-syncer"
configMapFinalizerName = "configmap.k3k.io/finalizer"
)
type ConfigMapSyncer struct {
// SyncerContext contains all client information for host and virtual cluster
*SyncerContext
}
func (c *ConfigMapSyncer) Name() string {
return configMapControllerName
}
// AddConfigMapSyncer adds configmap syncer controller to the manager of the virtual cluster
func AddConfigMapSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
reconciler := ConfigMapSyncer{
SyncerContext: &SyncerContext{
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, configMapControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(name).
For(&corev1.ConfigMap{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
func (c *ConfigMapSyncer) filterResources(object client.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for configMap Sync Config
syncConfig := cluster.Spec.Sync.ConfigMaps
// If syncing is disabled, only process deletions to allow for cleanup.
if !syncConfig.Enabled {
return object.GetDeletionTimestamp() != nil
}
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
if labelSelector.Empty() {
return true
}
return labelSelector.Matches(labels.Set(object.GetLabels()))
}
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", c.ClusterName, "clusterNamespace", c.ClusterName)
ctx = ctrl.LoggerInto(ctx, log)
var cluster v1beta1.Cluster
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
var virtualConfigMap corev1.ConfigMap
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtualConfigMap); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
syncedConfigMap := c.translateConfigMap(&virtualConfigMap)
// handle deletion
if !virtualConfigMap.DeletionTimestamp.IsZero() {
// deleting the synced configMap if exist
if err := c.HostClient.Delete(ctx, syncedConfigMap); err != nil && !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// remove the finalizer after cleaning up the synced configMap
if controllerutil.RemoveFinalizer(&virtualConfigMap, configMapFinalizerName) {
if err := c.VirtualClient.Update(ctx, &virtualConfigMap); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&virtualConfigMap, configMapFinalizerName) {
if err := c.VirtualClient.Update(ctx, &virtualConfigMap); err != nil {
return reconcile.Result{}, err
}
}
var hostConfigMap corev1.ConfigMap
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: syncedConfigMap.Name, Namespace: syncedConfigMap.Namespace}, &hostConfigMap); err != nil {
if apierrors.IsNotFound(err) {
log.Info("creating the ConfigMap for the first time on the host cluster")
return reconcile.Result{}, c.HostClient.Create(ctx, syncedConfigMap)
}
return reconcile.Result{}, err
}
// TODO: Add option to keep labels/annotation set by the host cluster
log.Info("updating ConfigMap on the host cluster")
return reconcile.Result{}, c.HostClient.Update(ctx, syncedConfigMap)
}
// translateConfigMap will translate a given configMap created in the virtual cluster and
// translates it to host cluster object
func (c *ConfigMapSyncer) translateConfigMap(configMap *corev1.ConfigMap) *corev1.ConfigMap {
hostConfigMap := configMap.DeepCopy()
c.Translator.TranslateTo(hostConfigMap)
return hostConfigMap
}

View File

@@ -0,0 +1,236 @@
package syncer_test
import (
"context"
"fmt"
"time"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var ConfigMapTests = func() {
var (
namespace string
cluster v1beta1.Cluster
)
BeforeEach(func() {
ctx := context.Background()
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
}
err := hostTestEnv.k8sClient.Create(ctx, &ns)
Expect(err).NotTo(HaveOccurred())
namespace = ns.Name
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
err = syncer.AddConfigMapSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
Expect(err).NotTo(HaveOccurred())
})
It("creates a ConfigMap on the host cluster", func() {
ctx := context.Background()
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cm-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Data: map[string]string{
"foo": "bar",
},
}
err := virtTestEnv.k8sClient.Create(ctx, configMap)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
var hostConfigMap v1.ConfigMap
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Configmap %s in host cluster", hostConfigMapName))
Expect(hostConfigMap.Data).To(Equal(configMap.Data))
Expect(hostConfigMap.Labels).To(ContainElement("bar"))
GinkgoWriter.Printf("labels: %v\n", hostConfigMap.Labels)
})
It("updates a ConfigMap on the host cluster", func() {
ctx := context.Background()
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cm-",
Namespace: "default",
},
Data: map[string]string{
"foo": "bar",
},
}
err := virtTestEnv.k8sClient.Create(ctx, configMap)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
var hostConfigMap v1.ConfigMap
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created configmap %s in host cluster", hostConfigMapName))
Expect(hostConfigMap.Data).To(Equal(configMap.Data))
Expect(hostConfigMap.Labels).NotTo(ContainElement("bar"))
key := client.ObjectKeyFromObject(configMap)
err = virtTestEnv.k8sClient.Get(ctx, key, configMap)
Expect(err).NotTo(HaveOccurred())
configMap.Labels = map[string]string{"foo": "bar"}
// update virtual configmap
err = virtTestEnv.k8sClient.Update(ctx, configMap)
Expect(err).NotTo(HaveOccurred())
Expect(configMap.Labels).To(ContainElement("bar"))
err = virtTestEnv.k8sClient.Get(ctx, key, configMap)
// check hostConfigMap
Eventually(func() map[string]string {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
Expect(err).NotTo(HaveOccurred())
return hostConfigMap.Labels
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(ContainElement("bar"))
})
It("deletes a configMap on the host cluster", func() {
ctx := context.Background()
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cm-",
Namespace: "default",
},
Data: map[string]string{
"foo": "bar",
},
}
err := virtTestEnv.k8sClient.Create(ctx, configMap)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
var hostConfigMap v1.ConfigMap
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created configmap %s in host cluster", hostConfigMapName))
Expect(hostConfigMap.Data).To(Equal(hostConfigMap.Data))
err = virtTestEnv.k8sClient.Delete(ctx, configMap)
Expect(err).NotTo(HaveOccurred())
Eventually(func() bool {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
It("will not sync a configMap if disabled", func() {
ctx := context.Background()
cluster.Spec.Sync.ConfigMaps.Enabled = false
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cm-",
Namespace: "default",
},
Data: map[string]string{
"foo": "bar",
},
}
err = virtTestEnv.k8sClient.Create(ctx, configMap)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
var hostConfigMap v1.ConfigMap
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
GinkgoWriter.Printf("error: %v", err)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
}

View File

@@ -0,0 +1,161 @@
package syncer
import (
"context"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
ingressControllerName = "ingress-syncer-controller"
ingressFinalizerName = "ingress.k3k.io/finalizer"
)
type IngressReconciler struct {
*SyncerContext
}
// AddIngressSyncer adds ingress syncer controller to the manager of the virtual cluster
func AddIngressSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
reconciler := IngressReconciler{
SyncerContext: &SyncerContext{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, ingressControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(name).
For(&networkingv1.Ingress{}).
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
func (r *IngressReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for ingressConfig
syncConfig := cluster.Spec.Sync.Ingresses
// If syncing is disabled, only process deletions to allow for cleanup.
if !syncConfig.Enabled {
return object.GetDeletionTimestamp() != nil
}
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
if labelSelector.Empty() {
return true
}
return labelSelector.Matches(labels.Set(object.GetLabels()))
}
func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
log.Info("reconciling ingress object")
var (
virtIngress networkingv1.Ingress
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtIngress); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedIngress := r.ingress(&virtIngress)
if err := controllerutil.SetControllerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtIngress.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
if err := r.HostClient.Delete(ctx, syncedIngress); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// remove the finalizer after cleaning up the synced service
if controllerutil.RemoveFinalizer(&virtIngress, ingressFinalizerName) {
if err := r.VirtualClient.Update(ctx, &virtIngress); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&virtIngress, ingressFinalizerName) {
if err := r.VirtualClient.Update(ctx, &virtIngress); err != nil {
return reconcile.Result{}, err
}
}
// create or update the ingress on host
var hostIngress networkingv1.Ingress
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: syncedIngress.Name, Namespace: r.ClusterNamespace}, &hostIngress); err != nil {
if apierrors.IsNotFound(err) {
log.Info("creating the ingress for the first time on the host cluster")
return reconcile.Result{}, r.HostClient.Create(ctx, syncedIngress)
}
return reconcile.Result{}, err
}
log.Info("updating ingress on the host cluster")
return reconcile.Result{}, r.HostClient.Update(ctx, syncedIngress)
}
func (s *IngressReconciler) ingress(obj *networkingv1.Ingress) *networkingv1.Ingress {
hostIngress := obj.DeepCopy()
s.Translator.TranslateTo(hostIngress)
for _, rule := range hostIngress.Spec.Rules {
// modify services in rules to point to the synced services
if rule.HTTP != nil {
for _, path := range rule.HTTP.Paths {
if path.Backend.Service != nil {
path.Backend.Service.Name = s.Translator.TranslateName(obj.GetNamespace(), path.Backend.Service.Name)
}
}
}
}
// don't sync finalizers to the host
return hostIngress
}

View File

@@ -0,0 +1,349 @@
package syncer_test
import (
"context"
"fmt"
"time"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var IngressTests = func() {
var (
namespace string
cluster v1beta1.Cluster
)
BeforeEach(func() {
ctx := context.Background()
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
}
err := hostTestEnv.k8sClient.Create(ctx, &ns)
Expect(err).NotTo(HaveOccurred())
namespace = ns.Name
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
Ingresses: v1beta1.IngressSyncConfig{
Enabled: true,
},
},
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
err = syncer.AddIngressSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
Expect(err).NotTo(HaveOccurred())
})
It("creates a Ingress on the host cluster", func() {
ctx := context.Background()
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ingress-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "test.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
PathType: ptr.To(networkingv1.PathTypePrefix),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test-service",
Port: networkingv1.ServiceBackendPort{
Name: "test-port",
},
},
},
},
},
},
},
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
GinkgoWriter.Printf("labels: %v\n", hostIngress.Labels)
})
It("updates a Ingress on the host cluster", func() {
ctx := context.Background()
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ingress-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "test.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
PathType: ptr.To(networkingv1.PathTypePrefix),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test-service",
Port: networkingv1.ServiceBackendPort{
Name: "test-port",
},
},
},
},
},
},
},
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
key := client.ObjectKeyFromObject(ingress)
err = virtTestEnv.k8sClient.Get(ctx, key, ingress)
Expect(err).NotTo(HaveOccurred())
ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "test-service-updated"
// update virtual ingress
err = virtTestEnv.k8sClient.Update(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
// check hostIngress
Eventually(func() string {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
Expect(err).NotTo(HaveOccurred())
return hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(Equal(translateName(cluster, ingress.Namespace, "test-service-updated")))
})
It("deletes a Ingress on the host cluster", func() {
ctx := context.Background()
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ingress-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "test.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
PathType: ptr.To(networkingv1.PathTypePrefix),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test-service",
Port: networkingv1.ServiceBackendPort{
Name: "test-port",
},
},
},
},
},
},
},
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
err = virtTestEnv.k8sClient.Delete(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
Eventually(func() bool {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
It("will not sync an Ingress if disabled", func() {
ctx := context.Background()
cluster.Spec.Sync.Ingresses.Enabled = false
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ingress-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "test.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
PathType: ptr.To(networkingv1.PathTypePrefix),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test-service",
Port: networkingv1.ServiceBackendPort{
Name: "test-port",
},
},
},
},
},
},
},
},
},
},
}
err = virtTestEnv.k8sClient.Create(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
}

View File

@@ -0,0 +1,138 @@
package syncer
import (
"context"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
pvcControllerName = "pvc-syncer-controller"
pvcFinalizerName = "pvc.k3k.io/finalizer"
)
type PVCReconciler struct {
*SyncerContext
}
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
reconciler := PVCReconciler{
SyncerContext: &SyncerContext{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, pvcControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(name).
For(&v1.PersistentVolumeClaim{}).
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
func (r *PVCReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for pvc config
syncConfig := cluster.Spec.Sync.PersistentVolumeClaims
// If syncing is disabled, only process deletions to allow for cleanup.
if !syncConfig.Enabled {
return object.GetDeletionTimestamp() != nil
}
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
if labelSelector.Empty() {
return true
}
return labelSelector.Matches(labels.Set(object.GetLabels()))
}
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
virtPVC v1.PersistentVolumeClaim
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedPVC := r.pvc(&virtPVC)
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtPVC.DeletionTimestamp.IsZero() {
// deleting the synced pvc if exists
if err := r.HostClient.Delete(ctx, syncedPVC); err != nil && !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// remove the finalizer after cleaning up the synced pvc
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
if err := r.VirtualClient.Update(ctx, &virtPVC); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
if err := r.VirtualClient.Update(ctx, &virtPVC); err != nil {
return reconcile.Result{}, err
}
}
// create the pvc on host
log.Info("creating the persistent volume claim for the first time on the host cluster")
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
// handled by the host cluster.
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.HostClient.Create(ctx, syncedPVC))
}
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
hostPVC := obj.DeepCopy()
r.Translator.TranslateTo(hostPVC)
return hostPVC
}

View File

@@ -0,0 +1,104 @@
package syncer_test
import (
"context"
"fmt"
"time"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var PVCTests = func() {
var (
namespace string
cluster v1beta1.Cluster
)
BeforeEach(func() {
ctx := context.Background()
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
}
err := hostTestEnv.k8sClient.Create(ctx, &ns)
Expect(err).NotTo(HaveOccurred())
namespace = ns.Name
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
err = syncer.AddPVCSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
Expect(err).NotTo(HaveOccurred())
})
It("creates a pvc on the host cluster", func() {
ctx := context.Background()
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: ptr.To("test-sc"),
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadOnlyMany,
},
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse("1G"),
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, pvc)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created PVC %s in virtual cluster", pvc.Name))
var hostPVC v1.PersistentVolumeClaim
hostPVCName := translateName(cluster, pvc.Namespace, pvc.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostPVCName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostPVC)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created PVC %s in host cluster", hostPVCName))
Expect(*hostPVC.Spec.StorageClassName).To(Equal("test-sc"))
GinkgoWriter.Printf("labels: %v\n", hostPVC.Labels)
})
}

View File

@@ -1,76 +1,85 @@
package controller
package syncer
import (
"context"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/component-helpers/storage/volume"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
podController = "pod-pvc-controller"
pseudoPVLabel = "pod.k3k.io/pseudoPV"
podControllerName = "pod-pvc-controller"
pseudoPVLabel = "pod.k3k.io/pseudoPV"
)
type PodReconciler struct {
clusterName string
clusterNamespace string
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
*SyncerContext
}
// AddPodPVCController adds pod controller to k3k-kubelet
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
translator := translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := PodReconciler{
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
SyncerContext: &SyncerContext{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translate.ToHostTranslator{},
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, podControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(podController).
Named(name).
For(&v1.Pod{}).
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
func (r *PodReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for pvc config
syncConfig := cluster.Spec.Sync.PersistentVolumeClaims
// If PVC syncing is disabled, only process deletions to allow for cleanup.
return syncConfig.Enabled || object.GetDeletionTimestamp() != nil
}
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
virtPod v1.Pod
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
@@ -101,14 +110,19 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
Namespace: pod.Namespace,
}
if err := r.virtualClient.Get(ctx, key, &pvc); err != nil {
if err := r.VirtualClient.Get(ctx, key, &pvc); err != nil {
return ctrlruntimeclient.IgnoreNotFound(err)
}
pv := r.pseudoPV(&pvc)
if pod.DeletionTimestamp != nil {
return r.handlePodDeletion(ctx, pv)
}
log.Info("Creating pseudo Persistent Volume")
pv := r.pseudoPV(&pvc)
if err := r.virtualClient.Create(ctx, pv); err != nil {
if err := r.VirtualClient.Create(ctx, pv); err != nil {
return ctrlruntimeclient.IgnoreAlreadyExists(err)
}
@@ -117,7 +131,7 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
Phase: v1.VolumeBound,
}
if err := r.virtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
if err := r.VirtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
return err
}
@@ -133,7 +147,7 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
pvcPatch.Status.Phase = v1.ClaimBound
pvcPatch.Status.AccessModes = pvcPatch.Spec.AccessModes
return r.virtualClient.Status().Update(ctx, pvcPatch)
return r.VirtualClient.Status().Update(ctx, pvcPatch)
}
func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVolume {
@@ -180,3 +194,22 @@ func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVo
},
}
}
func (r *PodReconciler) handlePodDeletion(ctx context.Context, pv *v1.PersistentVolume) error {
var currentPV v1.PersistentVolume
if err := r.VirtualClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(pv), &currentPV); err != nil {
return ctrlruntimeclient.IgnoreNotFound(err)
}
pvPatch := currentPV.DeepCopy()
pvPatch.Spec.ClaimRef = nil
pvPatch.Status.Phase = v1.VolumeReleased
controllerutil.RemoveFinalizer(pvPatch, "kubernetes.io/pv-protection")
if err := r.VirtualClient.Status().Update(ctx, pvPatch); err != nil {
return err
}
return ctrlruntimeclient.IgnoreNotFound(r.VirtualClient.Delete(ctx, &currentPV))
}

View File

@@ -0,0 +1,256 @@
package syncer_test
import (
"context"
"fmt"
"time"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var PriorityClassTests = func() {
var (
namespace string
cluster v1beta1.Cluster
)
BeforeEach(func() {
ctx := context.Background()
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
}
err := hostTestEnv.k8sClient.Create(ctx, &ns)
Expect(err).NotTo(HaveOccurred())
namespace = ns.Name
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
PriorityClasses: v1beta1.PriorityClassSyncConfig{
Enabled: true,
},
},
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
err = syncer.AddPriorityClassSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
Expect(err).NotTo(HaveOccurred())
})
It("creates a priorityClass on the host cluster", func() {
ctx := context.Background()
priorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pc-",
Labels: map[string]string{
"foo": "bar",
},
},
Value: 1001,
}
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostPriorityClassName}
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created priorityClass %s in host cluster", hostPriorityClassName))
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
Expect(hostPriorityClass.Labels).To(ContainElement("bar"))
GinkgoWriter.Printf("labels: %v\n", hostPriorityClass.Labels)
})
It("updates a priorityClass on the host cluster", func() {
ctx := context.Background()
priorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
Value: 1001,
}
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostPriorityClassName}
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created priorityClass %s in host cluster", hostPriorityClassName))
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
Expect(hostPriorityClass.Labels).NotTo(ContainElement("bar"))
key := client.ObjectKeyFromObject(priorityClass)
err = virtTestEnv.k8sClient.Get(ctx, key, priorityClass)
Expect(err).NotTo(HaveOccurred())
priorityClass.Labels = map[string]string{"foo": "bar"}
// update virtual priorityClass
err = virtTestEnv.k8sClient.Update(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
Expect(priorityClass.Labels).To(ContainElement("bar"))
// check hostPriorityClass
Eventually(func() map[string]string {
key := client.ObjectKey{Name: hostPriorityClassName}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
Expect(err).NotTo(HaveOccurred())
return hostPriorityClass.Labels
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(ContainElement("bar"))
})
It("deletes a priorityClass on the host cluster", func() {
ctx := context.Background()
priorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
Value: 1001,
}
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostPriorityClassName}
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created priorityClass %s in host cluster", hostPriorityClassName))
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
err = virtTestEnv.k8sClient.Delete(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
Eventually(func() bool {
key := client.ObjectKey{Name: hostPriorityClassName}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
It("creates a priorityClass on the host cluster with the globalDefault annotation", func() {
ctx := context.Background()
priorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
Value: 1001,
GlobalDefault: true,
}
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostPriorityClassName}
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created priorityClass %s in host cluster without the GlobalDefault value", hostPriorityClassName))
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
Expect(hostPriorityClass.GlobalDefault).To(BeFalse())
Expect(hostPriorityClass.Annotations[syncer.PriorityClassGlobalDefaultAnnotation]).To(Equal("true"))
})
It("will not create a priorityClass on the host cluster if disabled", func() {
ctx := context.Background()
cluster.Spec.Sync.PriorityClasses.Enabled = false
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
priorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
Value: 1001,
}
err = virtTestEnv.k8sClient.Create(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostPriorityClassName}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
}

View File

@@ -0,0 +1,174 @@
package syncer
import (
"context"
"strings"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
schedulingv1 "k8s.io/api/scheduling/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
PriorityClassGlobalDefaultAnnotation = "priorityclass.k3k.io/globalDefault"
priorityClassControllerName = "priorityclass-syncer-controller"
priorityClassFinalizerName = "priorityclass.k3k.io/finalizer"
)
type PriorityClassSyncer struct {
*SyncerContext
}
// AddPriorityClassSyncer adds a PriorityClass reconciler to k3k-kubelet
func AddPriorityClassSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
// initialize a new Reconciler
reconciler := PriorityClassSyncer{
SyncerContext: &SyncerContext{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, priorityClassControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(name).
For(&schedulingv1.PriorityClass{}).WithEventFilter(ignoreSystemPrefixPredicate).
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
// IgnoreSystemPrefixPredicate filters out resources whose names start with "system-".
var ignoreSystemPrefixPredicate = predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
return !strings.HasPrefix(e.ObjectOld.GetName(), "system-")
},
CreateFunc: func(e event.CreateEvent) bool {
return !strings.HasPrefix(e.Object.GetName(), "system-")
},
DeleteFunc: func(e event.DeleteEvent) bool {
return !strings.HasPrefix(e.Object.GetName(), "system-")
},
GenericFunc: func(e event.GenericEvent) bool {
return !strings.HasPrefix(e.Object.GetName(), "system-")
},
}
func (r *PriorityClassSyncer) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for priorityClassConfig
syncConfig := cluster.Spec.Sync.PriorityClasses
// If syncing is disabled, only process deletions to allow for cleanup.
if !syncConfig.Enabled {
return object.GetDeletionTimestamp() != nil
}
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
if labelSelector.Empty() {
return true
}
return labelSelector.Matches(labels.Set(object.GetLabels()))
}
func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
priorityClass schedulingv1.PriorityClass
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &priorityClass); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
hostPriorityClass := r.translatePriorityClass(priorityClass)
// handle deletion
if !priorityClass.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
// TODO add test for previous implementation without err != nil check, and also check the other controllers
if err := r.HostClient.Delete(ctx, hostPriorityClass); err != nil && !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// remove the finalizer after cleaning up the synced service
if controllerutil.RemoveFinalizer(&priorityClass, priorityClassFinalizerName) {
if err := r.VirtualClient.Update(ctx, &priorityClass); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&priorityClass, priorityClassFinalizerName) {
if err := r.VirtualClient.Update(ctx, &priorityClass); err != nil {
return reconcile.Result{}, err
}
}
// create the priorityClass on the host
log.Info("creating the priorityClass for the first time on the host cluster")
err := r.HostClient.Create(ctx, hostPriorityClass)
if err != nil {
if !apierrors.IsAlreadyExists(err) {
return reconcile.Result{}, err
}
return reconcile.Result{}, r.HostClient.Update(ctx, hostPriorityClass)
}
return reconcile.Result{}, nil
}
func (r *PriorityClassSyncer) translatePriorityClass(priorityClass schedulingv1.PriorityClass) *schedulingv1.PriorityClass {
hostPriorityClass := priorityClass.DeepCopy()
r.Translator.TranslateTo(hostPriorityClass)
if hostPriorityClass.Annotations == nil {
hostPriorityClass.Annotations = make(map[string]string)
}
if hostPriorityClass.GlobalDefault {
hostPriorityClass.GlobalDefault = false
hostPriorityClass.Annotations[PriorityClassGlobalDefaultAnnotation] = "true"
}
return hostPriorityClass
}

View File

@@ -0,0 +1,155 @@
package syncer
import (
"context"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
secretControllerName = "secret-syncer"
secretFinalizerName = "secret.k3k.io/finalizer"
)
type SecretSyncer struct {
// SyncerContext contains all client information for host and virtual cluster
*SyncerContext
}
func (s *SecretSyncer) Name() string {
return secretControllerName
}
// AddSecretSyncer adds secret syncer controller to the manager of the virtual cluster
func AddSecretSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
reconciler := SecretSyncer{
SyncerContext: &SyncerContext{
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, secretControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(name).
For(&v1.Secret{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
func (r *SecretSyncer) filterResources(object client.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for Secrets Sync Config
syncConfig := cluster.Spec.Sync.Secrets
// If syncing is disabled, only process deletions to allow for cleanup.
if !syncConfig.Enabled {
return object.GetDeletionTimestamp() != nil
}
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
if labelSelector.Empty() {
return true
}
return labelSelector.Matches(labels.Set(object.GetLabels()))
}
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", s.ClusterName, "clusterNamespace", s.ClusterName)
ctx = ctrl.LoggerInto(ctx, log)
var cluster v1beta1.Cluster
if err := s.HostClient.Get(ctx, types.NamespacedName{Name: s.ClusterName, Namespace: s.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
var virtualSecret v1.Secret
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtualSecret); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
syncedSecret := s.translateSecret(&virtualSecret)
// handle deletion
if !virtualSecret.DeletionTimestamp.IsZero() {
// deleting the synced secret if exist
if err := s.HostClient.Delete(ctx, syncedSecret); err != nil && !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// remove the finalizer after cleaning up the synced secret
if controllerutil.RemoveFinalizer(&virtualSecret, secretFinalizerName) {
if err := s.VirtualClient.Update(ctx, &virtualSecret); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&virtualSecret, secretFinalizerName) {
if err := s.VirtualClient.Update(ctx, &virtualSecret); err != nil {
return reconcile.Result{}, err
}
}
var hostSecret v1.Secret
if err := s.HostClient.Get(ctx, types.NamespacedName{Name: syncedSecret.Name, Namespace: syncedSecret.Namespace}, &hostSecret); err != nil {
if apierrors.IsNotFound(err) {
log.Info("creating the Secret for the first time on the host cluster")
return reconcile.Result{}, s.HostClient.Create(ctx, syncedSecret)
}
return reconcile.Result{}, err
}
// TODO: Add option to keep labels/annotation set by the host cluster
log.Info("updating Secret on the host cluster")
return reconcile.Result{}, s.HostClient.Update(ctx, syncedSecret)
}
// translateSecret will translate a given secret created in the virtual cluster and
// translates it to host cluster object
func (s *SecretSyncer) translateSecret(secret *v1.Secret) *v1.Secret {
hostSecret := secret.DeepCopy()
if hostSecret.Type == v1.SecretTypeServiceAccountToken {
hostSecret.Type = v1.SecretTypeOpaque
}
s.Translator.TranslateTo(hostSecret)
return hostSecret
}

View File

@@ -0,0 +1,233 @@
package syncer_test
import (
"context"
"fmt"
"time"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var SecretTests = func() {
var (
namespace string
cluster v1beta1.Cluster
)
BeforeEach(func() {
ctx := context.Background()
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
}
err := hostTestEnv.k8sClient.Create(ctx, &ns)
Expect(err).NotTo(HaveOccurred())
namespace = ns.Name
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
err = syncer.AddSecretSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
Expect(err).NotTo(HaveOccurred())
})
It("creates a Secret on the host cluster", func() {
ctx := context.Background()
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "secret-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Data: map[string][]byte{
"foo": []byte("bar"),
},
}
err := virtTestEnv.k8sClient.Create(ctx, secret)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created Secret %s in virtual cluster", secret.Name))
var hostSecret v1.Secret
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Secret %s in host cluster", hostSecretName))
Expect(hostSecret.Data).To(Equal(secret.Data))
Expect(hostSecret.Labels).To(ContainElement("bar"))
GinkgoWriter.Printf("labels: %v\n", hostSecret.Labels)
})
It("updates a Secret on the host cluster", func() {
ctx := context.Background()
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "secret-",
Namespace: "default",
},
Data: map[string][]byte{
"foo": []byte("bar"),
},
}
err := virtTestEnv.k8sClient.Create(ctx, secret)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
var hostSecret v1.Secret
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created secret %s in host cluster", hostSecretName))
Expect(hostSecret.Data).To(Equal(secret.Data))
Expect(hostSecret.Labels).NotTo(ContainElement("bar"))
key := client.ObjectKeyFromObject(secret)
err = virtTestEnv.k8sClient.Get(ctx, key, secret)
Expect(err).NotTo(HaveOccurred())
secret.Labels = map[string]string{"foo": "bar"}
// update virtual secret
err = virtTestEnv.k8sClient.Update(ctx, secret)
Expect(err).NotTo(HaveOccurred())
Expect(secret.Labels).To(ContainElement("bar"))
// check hostSecret
Eventually(func() map[string]string {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
Expect(err).NotTo(HaveOccurred())
return hostSecret.Labels
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(ContainElement("bar"))
})
It("deletes a secret on the host cluster", func() {
ctx := context.Background()
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "secret-",
Namespace: "default",
},
Data: map[string][]byte{
"foo": []byte("bar"),
},
}
err := virtTestEnv.k8sClient.Create(ctx, secret)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
var hostSecret v1.Secret
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created secret %s in host cluster", hostSecretName))
Expect(hostSecret.Data).To(Equal(hostSecret.Data))
err = virtTestEnv.k8sClient.Delete(ctx, secret)
Expect(err).NotTo(HaveOccurred())
Eventually(func() bool {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
It("will not create a secret on the host cluster if disabled", func() {
ctx := context.Background()
cluster.Spec.Sync.Secrets.Enabled = false
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "secret-",
Namespace: "default",
},
Data: map[string][]byte{
"foo": []byte("bar"),
},
}
err = virtTestEnv.k8sClient.Create(ctx, secret)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
var hostSecret v1.Secret
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
}

View File

@@ -1,35 +1,31 @@
package controller
package syncer
import (
"context"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
serviceSyncerController = "service-syncer-controller"
serviceFinalizerName = "service.k3k.io/finalizer"
serviceControllerName = "service-syncer-controller"
serviceFinalizerName = "service.k3k.io/finalizer"
)
type ServiceReconciler struct {
clusterName string
clusterNamespace string
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
*SyncerContext
}
// AddServiceSyncer adds service syncer controller to the manager of the virtual cluster
@@ -40,24 +36,25 @@ func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clu
}
reconciler := ServiceReconciler{
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
SyncerContext: &SyncerContext{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translator,
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, serviceControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(serviceSyncerController).
For(&v1.Service{}).
Named(name).
For(&v1.Service{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
if req.Name == "kubernetes" || req.Name == "kube-dns" {
@@ -66,34 +63,32 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
var (
virtService v1.Service
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedService := r.service(&virtService)
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostScheme); err != nil {
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtService.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
if err := r.hostClient.Delete(ctx, syncedService); err != nil {
if err := r.HostClient.Delete(ctx, syncedService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// remove the finalizer after cleaning up the synced service
if controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName)
if err := r.virtualClient.Update(ctx, &virtService); err != nil {
if controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName) {
if err := r.VirtualClient.Update(ctx, &virtService); err != nil {
return reconcile.Result{}, err
}
}
@@ -102,20 +97,18 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
// Add finalizer if it does not exist
if !controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
controllerutil.AddFinalizer(&virtService, serviceFinalizerName)
if err := r.virtualClient.Update(ctx, &virtService); err != nil {
if controllerutil.AddFinalizer(&virtService, serviceFinalizerName) {
if err := r.VirtualClient.Update(ctx, &virtService); err != nil {
return reconcile.Result{}, err
}
}
// create or update the service on host
var hostService v1.Service
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: r.clusterNamespace}, &hostService); err != nil {
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: r.ClusterNamespace}, &hostService); err != nil {
if apierrors.IsNotFound(err) {
log.Info("creating the service for the first time on the host cluster")
return reconcile.Result{}, r.hostClient.Create(ctx, syncedService)
return reconcile.Result{}, r.HostClient.Create(ctx, syncedService)
}
return reconcile.Result{}, err
@@ -123,7 +116,32 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
log.Info("updating service on the host cluster")
return reconcile.Result{}, r.hostClient.Update(ctx, syncedService)
return reconcile.Result{}, r.HostClient.Update(ctx, syncedService)
}
func (r *ServiceReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for serviceSyncConfig
syncConfig := cluster.Spec.Sync.Services
// If syncing is disabled, only process deletions to allow for cleanup.
if !syncConfig.Enabled {
return object.GetDeletionTimestamp() != nil
}
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
if labelSelector.Empty() {
return true
}
return labelSelector.Matches(labels.Set(object.GetLabels()))
}
func (s *ServiceReconciler) service(obj *v1.Service) *v1.Service {

View File

@@ -0,0 +1,269 @@
package syncer_test
import (
"context"
"fmt"
"time"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var ServiceTests = func() {
var (
namespace string
cluster v1beta1.Cluster
)
BeforeEach(func() {
ctx := context.Background()
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
}
err := hostTestEnv.k8sClient.Create(ctx, &ns)
Expect(err).NotTo(HaveOccurred())
namespace = ns.Name
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
err = syncer.AddServiceSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
Expect(err).NotTo(HaveOccurred())
})
It("creates a service on the host cluster", func() {
ctx := context.Background()
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "service-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{
{
Name: "test-port",
Port: 8888,
TargetPort: intstr.FromInt32(8888),
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, service)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
var hostService v1.Service
hostServiceName := translateName(cluster, service.Namespace, service.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Service %s in host cluster", hostServiceName))
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
GinkgoWriter.Printf("labels: %v\n", hostService.Labels)
})
It("updates a service on the host cluster", func() {
ctx := context.Background()
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "service-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{
{
Name: "test-port",
Port: 8888,
TargetPort: intstr.FromInt32(8888),
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, service)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
var hostService v1.Service
hostServiceName := translateName(cluster, service.Namespace, service.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created Service %s in host cluster", hostServiceName))
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
key := client.ObjectKeyFromObject(service)
err = virtTestEnv.k8sClient.Get(ctx, key, service)
Expect(err).NotTo(HaveOccurred())
service.Spec.Ports[0].Name = "test-port-updated"
// update virtual service
err = virtTestEnv.k8sClient.Update(ctx, service)
Expect(err).NotTo(HaveOccurred())
// check hostService
Eventually(func() string {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
Expect(err).NotTo(HaveOccurred())
return hostService.Spec.Ports[0].Name
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(Equal("test-port-updated"))
})
It("deletes a service on the host cluster", func() {
ctx := context.Background()
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "service-",
Namespace: "default",
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{
{
Name: "test-port",
Port: 8888,
TargetPort: intstr.FromInt32(8888),
},
},
},
}
err := virtTestEnv.k8sClient.Create(ctx, service)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
var hostService v1.Service
hostServiceName := translateName(cluster, service.Namespace, service.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created service %s in host cluster", hostServiceName))
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
err = virtTestEnv.k8sClient.Delete(ctx, service)
Expect(err).NotTo(HaveOccurred())
Eventually(func() bool {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostService)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
It("will not create a service on the host cluster if disabled", func() {
ctx := context.Background()
cluster.Spec.Sync.Services.Enabled = false
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "service-",
Namespace: "default",
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{
{
Name: "test-port",
Port: 8888,
TargetPort: intstr.FromInt32(8888),
},
},
},
}
err = virtTestEnv.k8sClient.Create(ctx, service)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
var hostService v1.Service
hostServiceName := translateName(cluster, service.Namespace, service.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
}

View File

@@ -0,0 +1,15 @@
package syncer
import (
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
)
type SyncerContext struct {
ClusterName string
ClusterNamespace string
VirtualClient client.Client
HostClient client.Client
Translator translate.ToHostTranslator
}

View File

@@ -0,0 +1,184 @@
package syncer_test
import (
"context"
"errors"
"os"
"path"
"path/filepath"
"testing"
"github.com/go-logr/zapr"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestController(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Cluster Controller Suite")
}
type TestEnv struct {
*envtest.Environment
k8s *kubernetes.Clientset
k8sClient client.Client
}
var (
hostTestEnv *TestEnv
hostManager ctrl.Manager
virtTestEnv *TestEnv
virtManager ctrl.Manager
)
var _ = BeforeSuite(func() {
hostTestEnv = NewTestEnv()
By("HOST testEnv running at :" + hostTestEnv.ControlPlane.APIServer.Port)
virtTestEnv = NewTestEnv()
By("VIRT testEnv running at :" + virtTestEnv.ControlPlane.APIServer.Port)
ctrl.SetLogger(zapr.NewLogger(zap.NewNop()))
ctrl.SetupSignalHandler()
})
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := hostTestEnv.Stop()
Expect(err).NotTo(HaveOccurred())
err = virtTestEnv.Stop()
Expect(err).NotTo(HaveOccurred())
tmpKubebuilderDir := path.Join(os.TempDir(), "kubebuilder")
err = os.RemoveAll(tmpKubebuilderDir)
Expect(err).NotTo(HaveOccurred())
})
func NewTestEnv() *TestEnv {
GinkgoHelper()
binaryAssetsDirectory := os.Getenv("KUBEBUILDER_ASSETS")
if binaryAssetsDirectory == "" {
binaryAssetsDirectory = "/usr/local/kubebuilder/bin"
}
tmpKubebuilderDir := path.Join(os.TempDir(), "kubebuilder")
if err := os.Mkdir(tmpKubebuilderDir, 0o755); !errors.Is(err, os.ErrExist) {
Expect(err).NotTo(HaveOccurred())
}
tempDir, err := os.MkdirTemp(tmpKubebuilderDir, "envtest-*")
Expect(err).NotTo(HaveOccurred())
err = os.CopyFS(tempDir, os.DirFS(binaryAssetsDirectory))
Expect(err).NotTo(HaveOccurred())
By("bootstrapping test environment")
testEnv := &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
ErrorIfCRDPathMissing: true,
BinaryAssetsDirectory: tempDir,
Scheme: buildScheme(),
}
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
k8s, err := kubernetes.NewForConfig(cfg)
Expect(err).NotTo(HaveOccurred())
k8sClient, err := client.New(cfg, client.Options{Scheme: testEnv.Scheme})
Expect(err).NotTo(HaveOccurred())
return &TestEnv{
Environment: testEnv,
k8s: k8s,
k8sClient: k8sClient,
}
}
func buildScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
err := clientgoscheme.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
}
var _ = Describe("Kubelet Controller", func() {
var (
ctx context.Context
cancel context.CancelFunc
)
BeforeEach(func() {
var err error
ctx, cancel = context.WithCancel(context.Background())
hostManager, err = ctrl.NewManager(hostTestEnv.Config, ctrl.Options{
// disable the metrics server
Metrics: metricsserver.Options{BindAddress: "0"},
Scheme: hostTestEnv.Scheme,
})
Expect(err).NotTo(HaveOccurred())
virtManager, err = ctrl.NewManager(virtTestEnv.Config, ctrl.Options{
// disable the metrics server
Metrics: metricsserver.Options{BindAddress: "0"},
Scheme: virtTestEnv.Scheme,
})
Expect(err).NotTo(HaveOccurred())
go func() {
defer GinkgoRecover()
err := hostManager.Start(ctx)
Expect(err).NotTo(HaveOccurred(), "failed to run host manager")
}()
go func() {
defer GinkgoRecover()
err := virtManager.Start(ctx)
Expect(err).NotTo(HaveOccurred(), "failed to run virt manager")
}()
})
AfterEach(func() {
cancel()
})
Describe("PriorityClass Syncer", PriorityClassTests)
Describe("ConfigMap Syncer", ConfigMapTests)
Describe("Secret Syncer", SecretTests)
Describe("Service Syncer", ServiceTests)
Describe("Ingress Syncer", IngressTests)
Describe("PersistentVolumeClaim Syncer", PVCTests)
})
func translateName(cluster v1beta1.Cluster, namespace, name string) string {
translator := translate.ToHostTranslator{
ClusterName: cluster.Name,
ClusterNamespace: cluster.Namespace,
}
return translator.TranslateName(namespace, name)
}

View File

@@ -7,24 +7,25 @@ import (
"strconv"
"strings"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/log"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/manager"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
const (
webhookName = "podmutator.k3k.io"
webhookName = "podmutating.k3k.io"
webhookTimeout = int32(10)
webhookPort = "9443"
webhookPath = "/mutate--v1-pod"
FieldpathField = "k3k.io/fieldpath"
)
@@ -35,13 +36,14 @@ type webhookHandler struct {
serviceName string
clusterName string
clusterNamespace string
logger *log.Logger
logger logr.Logger
webhookPort int
}
// AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to
// AddPodMutatingWebhook will add a mutating webhook to the virtual cluster to
// modify the nodeName of the created pods with the name of the virtual kubelet node name
// as well as remove any status fields of the downward apis env fields
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger) error {
func AddPodMutatingWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger logr.Logger, webhookPort int) error {
handler := webhookHandler{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
@@ -49,9 +51,10 @@ func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient c
serviceName: serviceName,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
webhookPort: webhookPort,
}
// create mutator webhook configuration to the cluster
// create mutating webhook configuration to the cluster
config, err := handler.configuration(ctx, hostClient)
if err != nil {
return err
@@ -72,7 +75,7 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
return fmt.Errorf("invalid request: object was type %t not cluster", obj)
}
w.logger.Infow("mutator webhook request", "Pod", pod.Name, "Namespace", pod.Namespace)
w.logger.Info("mutating webhook request", "pod", pod.Name, "namespace", pod.Namespace)
// look for status.* fields in the env
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
@@ -97,11 +100,9 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
}
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
w.logger.Infow("extracting webhook tls from host cluster")
w.logger.Info("extracting webhook tls from host cluster")
var (
webhookTLSSecret v1.Secret
)
var webhookTLSSecret v1.Secret
if err := hostClient.Get(ctx, types.NamespacedName{Name: agent.WebhookSecretName(w.clusterName), Namespace: w.clusterNamespace}, &webhookTLSSecret); err != nil {
return nil, err
@@ -112,7 +113,7 @@ func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlrunti
return nil, errors.New("webhook CABundle does not exist in secret")
}
webhookURL := "https://" + w.serviceName + ":" + webhookPort + webhookPath
webhookURL := fmt.Sprintf("https://%s:%d%s", w.serviceName, w.webhookPort, webhookPath)
return &admissionregistrationv1.MutatingWebhookConfiguration{
TypeMeta: metav1.TypeMeta{

View File

@@ -8,53 +8,53 @@ import (
"fmt"
"net"
"net/http"
"os"
"time"
"github.com/go-logr/zapr"
certutil "github.com/rancher/dynamiclistener/cert"
k3kkubeletcontroller "github.com/rancher/k3k/k3k-kubelet/controller"
k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
k3klog "github.com/rancher/k3k/pkg/log"
"github.com/go-logr/logr"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/log/klogv2"
"github.com/virtual-kubelet/virtual-kubelet/node"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/kubernetes"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/cache"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
certutil "github.com/rancher/dynamiclistener/cert"
v1 "k8s.io/api/core/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
)
var (
baseScheme = runtime.NewScheme()
k3kKubeletName = "k3k-kubelet"
)
var baseScheme = runtime.NewScheme()
func init() {
_ = clientgoscheme.AddToScheme(baseScheme)
_ = v1alpha1.AddToScheme(baseScheme)
_ = v1beta1.AddToScheme(baseScheme)
}
type kubelet struct {
virtualCluster v1alpha1.Cluster
virtualCluster v1beta1.Cluster
name string
port int
@@ -67,12 +67,12 @@ type kubelet struct {
hostMgr manager.Manager
virtualMgr manager.Manager
node *nodeutil.Node
logger *k3klog.Logger
logger logr.Logger
token string
}
func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet, error) {
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostConfigPath)
func newKubelet(ctx context.Context, c *config, logger logr.Logger) (*kubelet, error) {
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostKubeconfig)
if err != nil {
return nil, err
}
@@ -84,7 +84,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, err
}
virtConfig, err := virtRestConfig(ctx, c.VirtualConfigPath, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
virtConfig, err := virtRestConfig(ctx, c.VirtKubeconfig, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
if err != nil {
return nil, err
}
@@ -94,7 +94,15 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, err
}
ctrl.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
ctrl.SetLogger(logger)
hostMetricsBindAddress := ":8083"
virtualMetricsBindAddress := ":8084"
if c.MirrorHostNodes {
hostMetricsBindAddress = "0"
virtualMetricsBindAddress = "0"
}
hostMgr, err := ctrl.NewManager(hostConfig, manager.Options{
Scheme: baseScheme,
@@ -102,7 +110,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
LeaderElectionNamespace: c.ClusterNamespace,
LeaderElectionID: c.ClusterName,
Metrics: ctrlserver.Options{
BindAddress: ":8083",
BindAddress: hostMetricsBindAddress,
},
Cache: cache.Options{
DefaultNamespaces: map[string]cache.Config{
@@ -122,6 +130,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
webhookServer := webhook.NewServer(webhook.Options{
CertDir: "/opt/rancher/k3k-webhook",
Port: c.WebhookPort,
})
virtualMgr, err := ctrl.NewManager(virtConfig, manager.Options{
@@ -131,36 +140,21 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
LeaderElectionNamespace: "kube-system",
LeaderElectionID: c.ClusterName,
Metrics: ctrlserver.Options{
BindAddress: ":8084",
BindAddress: virtualMetricsBindAddress,
},
})
if err != nil {
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
}
logger.Info("adding pod mutator webhook")
logger.Info("adding pod mutating webhook")
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger); err != nil {
return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error())
if err := k3kwebhook.AddPodMutatingWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
return nil, errors.New("unable to add pod mutating webhook for virtual cluster: " + err.Error())
}
logger.Info("adding service syncer controller")
if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add service syncer controller: " + err.Error())
}
logger.Info("adding pvc syncer controller")
if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add pvc syncer controller: " + err.Error())
}
logger.Info("adding pod pvc controller")
if err := k3kkubeletcontroller.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add pod pvc controller: " + err.Error())
if err := addControllers(ctx, hostMgr, virtualMgr, c, hostClient); err != nil {
return nil, errors.New("failed to add controller: " + err.Error())
}
clusterIP, err := clusterIP(ctx, c.ServiceName, c.ClusterNamespace, hostClient)
@@ -176,7 +170,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, errors.New("failed to get the DNS service for the cluster: " + err.Error())
}
var virtualCluster v1alpha1.Cluster
var virtualCluster v1beta1.Cluster
if err := hostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &virtualCluster); err != nil {
return nil, errors.New("failed to get virtualCluster spec: " + err.Error())
}
@@ -192,9 +186,10 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
hostMgr: hostMgr,
virtualMgr: virtualMgr,
agentIP: clusterIP,
logger: logger.Named(k3kKubeletName),
logger: logger,
token: c.Token,
dnsIP: dnsService.Spec.ClusterIP,
port: c.KubeletPort,
}, nil
}
@@ -213,9 +208,9 @@ func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostCl
return service.Spec.ClusterIP, nil
}
func (k *kubelet) registerNode(ctx context.Context, agentIP, srvPort, namespace, name, hostname, serverIP, dnsIP, version string) error {
providerFunc := k.newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version)
nodeOpts := k.nodeOpts(ctx, srvPort, namespace, name, hostname, agentIP)
func (k *kubelet) registerNode(agentIP string, cfg config) error {
providerFunc := k.newProviderFunc(cfg)
nodeOpts := k.nodeOpts(cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
var err error
@@ -233,55 +228,57 @@ func (k *kubelet) start(ctx context.Context) {
go func() {
err := k.hostMgr.Start(ctx)
if err != nil {
k.logger.Fatalw("host manager stopped", zap.Error(err))
k.logger.Error(err, "host manager stopped")
}
}()
go func() {
err := k.virtualMgr.Start(ctx)
if err != nil {
k.logger.Fatalw("virtual manager stopped", zap.Error(err))
k.logger.Error(err, "virtual manager stopped")
}
}()
// run the node async so that we can wait for it to be ready in another call
go func() {
ctx = log.WithLogger(ctx, k.logger)
klog.SetLogger(k.logger)
ctx = log.WithLogger(ctx, klogv2.New(nil))
if err := k.node.Run(ctx); err != nil {
k.logger.Fatalw("node errored when running", zap.Error(err))
k.logger.Error(err, "node errored when running")
}
}()
if err := k.node.WaitReady(context.Background(), time.Minute*1); err != nil {
k.logger.Fatalw("node was not ready within timeout of 1 minute", zap.Error(err))
k.logger.Error(err, "node was not ready within timeout of 1 minute")
}
<-k.node.Done()
if err := k.node.Err(); err != nil {
k.logger.Fatalw("node stopped with an error", zap.Error(err))
k.logger.Error(err, "node stopped with an error")
}
k.logger.Info("node exited successfully")
}
func (k *kubelet) newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version string) nodeutil.NewProviderFunc {
func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
return func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) {
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, namespace, name, serverIP, dnsIP)
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, cfg.ClusterNamespace, cfg.ClusterName, cfg.ServerIP, k.dnsIP)
if err != nil {
return nil, nil, errors.New("unable to make nodeutil provider: " + err.Error())
}
provider.ConfigureNode(k.logger, pc.Node, hostname, k.port, agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, version)
provider.ConfigureNode(k.logger, pc.Node, cfg.AgentHostname, k.port, k.agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, cfg.Version, cfg.MirrorHostNodes)
return utilProvider, &provider.Node{}, nil
}
}
func (k *kubelet) nodeOpts(ctx context.Context, srvPort, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
func (k *kubelet) nodeOpts(srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
return func(c *nodeutil.NodeConfig) error {
c.HTTPListenAddr = fmt.Sprintf(":%s", srvPort)
c.HTTPListenAddr = fmt.Sprintf(":%d", srvPort)
// set up the routes
mux := http.NewServeMux()
if err := nodeutil.AttachProviderRoutes(mux)(c); err != nil {
@@ -290,7 +287,7 @@ func (k *kubelet) nodeOpts(ctx context.Context, srvPort, namespace, name, hostna
c.Handler = mux
tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, hostname, k.token, agentIP)
tlsConfig, err := loadTLSConfig(name, namespace, k.name, hostname, k.token, agentIP)
if err != nil {
return errors.New("unable to get tls config: " + err.Error())
}
@@ -301,12 +298,12 @@ func (k *kubelet) nodeOpts(ctx context.Context, srvPort, namespace, name, hostna
}
}
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger *k3klog.Logger) (*rest.Config, error) {
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger logr.Logger) (*rest.Config, error) {
if virtualConfigPath != "" {
return clientcmd.BuildConfigFromFlags("", virtualConfigPath)
}
// virtual kubeconfig file is empty, trying to fetch the k3k cluster kubeconfig
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := hostClient.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, &cluster); err != nil {
return nil, err
}
@@ -320,7 +317,7 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
logger.Infow("decoded bootstrap", zap.Error(err))
logger.Error(err, "decoded bootstrap")
return err
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())
@@ -334,7 +331,6 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
b.ClientCA.Content,
b.ClientCAKey.Content,
)
if err != nil {
return nil, err
}
@@ -372,17 +368,10 @@ func kubeconfigBytes(url string, serverCA, clientCert, clientKey []byte) ([]byte
return clientcmd.Write(*config)
}
func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
var (
cluster v1alpha1.Cluster
b *bootstrap.ControlRuntimeBootstrap
)
func loadTLSConfig(clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
var b *bootstrap.ControlRuntimeBootstrap
if err := hostClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster); err != nil {
return nil, err
}
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace)
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(clusterName), clusterNamespace)
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
@@ -393,12 +382,13 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())
}
// POD IP
podIP := net.ParseIP(os.Getenv("POD_IP"))
ip := net.ParseIP(agentIP)
altNames := certutil.AltNames{
DNSNames: []string{hostname},
IPs: []net.IP{ip},
IPs: []net.IP{ip, podIP},
}
cert, key, err := certs.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content)
@@ -429,3 +419,56 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
Certificates: []tls.Certificate{clientCert},
}, nil
}
func addControllers(ctx context.Context, hostMgr, virtualMgr manager.Manager, c *config, hostClient ctrlruntimeclient.Client) error {
var cluster v1beta1.Cluster
objKey := types.NamespacedName{
Namespace: c.ClusterNamespace,
Name: c.ClusterName,
}
if err := hostClient.Get(ctx, objKey, &cluster); err != nil {
return err
}
if err := syncer.AddConfigMapSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add configmap global syncer: " + err.Error())
}
if err := syncer.AddSecretSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add secret global syncer: " + err.Error())
}
logger.Info("adding service syncer controller")
if err := syncer.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add service syncer controller: " + err.Error())
}
logger.Info("adding ingress syncer controller")
if err := syncer.AddIngressSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add ingress syncer controller: " + err.Error())
}
logger.Info("adding pvc syncer controller")
if err := syncer.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add pvc syncer controller: " + err.Error())
}
logger.Info("adding pod pvc controller")
if err := syncer.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add pod pvc controller: " + err.Error())
}
logger.Info("adding priorityclass controller")
if err := syncer.AddPriorityClassSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add priorityclass controller: " + err.Error())
}
return nil
}

View File

@@ -2,137 +2,129 @@ package main
import (
"context"
"errors"
"fmt"
"os"
"strings"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"github.com/rancher/k3k/pkg/log"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
"go.uber.org/zap"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/rancher/k3k/pkg/log"
)
var (
configFile string
cfg config
logger *log.Logger
logger logr.Logger
debug bool
logFormat string
)
func main() {
app := cli.NewApp()
app.Name = "k3k-kubelet"
app.Usage = "virtual kubelet implementation k3k"
app.Flags = []cli.Flag{
&cli.StringFlag{
Name: "cluster-name",
Usage: "Name of the k3k cluster",
Destination: &cfg.ClusterName,
EnvVars: []string{"CLUSTER_NAME"},
},
&cli.StringFlag{
Name: "cluster-namespace",
Usage: "Namespace of the k3k cluster",
Destination: &cfg.ClusterNamespace,
EnvVars: []string{"CLUSTER_NAMESPACE"},
},
&cli.StringFlag{
Name: "cluster-token",
Usage: "K3S token of the k3k cluster",
Destination: &cfg.Token,
EnvVars: []string{"CLUSTER_TOKEN"},
},
&cli.StringFlag{
Name: "host-config-path",
Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config",
Destination: &cfg.HostConfigPath,
EnvVars: []string{"HOST_KUBECONFIG"},
},
&cli.StringFlag{
Name: "virtual-config-path",
Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster",
Destination: &cfg.VirtualConfigPath,
EnvVars: []string{"CLUSTER_NAME"},
},
&cli.StringFlag{
Name: "kubelet-port",
Usage: "kubelet API port number",
Destination: &cfg.KubeletPort,
EnvVars: []string{"SERVER_PORT"},
Value: "10250",
},
&cli.StringFlag{
Name: "service-name",
Usage: "The service name deployed by the k3k controller",
Destination: &cfg.ServiceName,
EnvVars: []string{"SERVICE_NAME"},
},
&cli.StringFlag{
Name: "agent-hostname",
Usage: "Agent Hostname used for TLS SAN for the kubelet server",
Destination: &cfg.AgentHostname,
EnvVars: []string{"AGENT_HOSTNAME"},
},
&cli.StringFlag{
Name: "server-ip",
Usage: "Server IP used for registering the virtual kubelet to the cluster",
Destination: &cfg.ServerIP,
EnvVars: []string{"SERVER_IP"},
},
&cli.StringFlag{
Name: "version",
Usage: "Version of kubernetes server",
Destination: &cfg.Version,
EnvVars: []string{"VERSION"},
},
&cli.StringFlag{
Name: "config",
Usage: "Path to k3k-kubelet config file",
Destination: &configFile,
EnvVars: []string{"CONFIG_FILE"},
Value: "/etc/rancher/k3k/config.yaml",
},
&cli.BoolFlag{
Name: "debug",
Usage: "Enable debug logging",
Destination: &debug,
EnvVars: []string{"DEBUG"},
},
}
app.Before = func(clx *cli.Context) error {
logger = log.New(debug)
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
rootCmd := &cobra.Command{
Use: "k3k-kubelet",
Short: "virtual kubelet implementation k3k",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := InitializeConfig(cmd); err != nil {
return err
}
return nil
logger = zapr.NewLogger(log.New(debug, logFormat))
ctrlruntimelog.SetLogger(logger)
return nil
},
RunE: run,
}
app.Action = run
if err := app.Run(os.Args); err != nil {
rootCmd.PersistentFlags().BoolVarP(&debug, "debug", "", false, "Enable debug logging")
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "text", "Log format (text or json)")
rootCmd.PersistentFlags().StringVar(&cfg.ClusterName, "cluster-name", "", "Name of the k3k cluster")
rootCmd.PersistentFlags().StringVar(&cfg.ClusterNamespace, "cluster-namespace", "", "Namespace of the k3k cluster")
rootCmd.PersistentFlags().StringVar(&cfg.Token, "token", "", "K3S token of the k3k cluster")
rootCmd.PersistentFlags().StringVar(&cfg.HostKubeconfig, "host-kubeconfig", "", "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config")
rootCmd.PersistentFlags().StringVar(&cfg.VirtKubeconfig, "virt-kubeconfig", "", "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster")
rootCmd.PersistentFlags().IntVar(&cfg.KubeletPort, "kubelet-port", 0, "kubelet API port number")
rootCmd.PersistentFlags().IntVar(&cfg.WebhookPort, "webhook-port", 0, "Webhook port number")
rootCmd.PersistentFlags().StringVar(&cfg.ServiceName, "service-name", "", "The service name deployed by the k3k controller")
rootCmd.PersistentFlags().StringVar(&cfg.AgentHostname, "agent-hostname", "", "Agent Hostname used for TLS SAN for the kubelet server")
rootCmd.PersistentFlags().StringVar(&cfg.ServerIP, "server-ip", "", "Server IP used for registering the virtual kubelet to the cluster")
rootCmd.PersistentFlags().StringVar(&cfg.Version, "version", "", "Version of kubernetes server")
rootCmd.PersistentFlags().StringVar(&configFile, "config", "/opt/rancher/k3k/config.yaml", "Path to k3k-kubelet config file")
rootCmd.PersistentFlags().BoolVar(&cfg.MirrorHostNodes, "mirror-host-nodes", false, "Mirror real node objects from host cluster")
if err := rootCmd.Execute(); err != nil {
logrus.Fatal(err)
}
}
func run(clx *cli.Context) error {
func run(cmd *cobra.Command, args []string) error {
ctx := context.Background()
if err := cfg.parse(configFile); err != nil {
logger.Fatalw("failed to parse config file", "path", configFile, zap.Error(err))
}
if err := cfg.validate(); err != nil {
logger.Fatalw("failed to validate config", zap.Error(err))
return fmt.Errorf("failed to validate config: %w", err)
}
k, err := newKubelet(ctx, &cfg, logger)
if err != nil {
logger.Fatalw("failed to create new virtual kubelet instance", zap.Error(err))
return fmt.Errorf("failed to create new virtual kubelet instance: %w", err)
}
if err := k.registerNode(ctx, k.agentIP, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, cfg.ServerIP, k.dnsIP, cfg.Version); err != nil {
logger.Fatalw("failed to register new node", zap.Error(err))
if err := k.registerNode(k.agentIP, cfg); err != nil {
return fmt.Errorf("failed to register new node: %w", err)
}
k.start(ctx)
return nil
}
// InitializeConfig sets up viper to read from config file, environment variables, and flags.
// It uses a `flatcase` convention for viper keys to match the (lowercased) config file keys,
// while flags remain in kebab-case.
func InitializeConfig(cmd *cobra.Command) error {
var err error
// Bind every cobra flag to a viper key.
// The viper key will be the flag name with dashes removed (flatcase).
// e.g. "cluster-name" becomes "clustername"
cmd.Flags().VisitAll(func(f *pflag.Flag) {
configName := strings.ReplaceAll(f.Name, "-", "")
envName := strings.ToUpper(strings.ReplaceAll(f.Name, "-", "_"))
err = errors.Join(err, viper.BindPFlag(configName, f))
err = errors.Join(err, viper.BindEnv(configName, envName))
})
if err != nil {
return err
}
configFile = viper.GetString("config")
viper.SetConfigFile(configFile)
if err := viper.ReadInConfig(); err != nil {
var notFoundErr viper.ConfigFileNotFoundError
if errors.As(err, &notFoundErr) || errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("no config file found: %w", err)
} else {
return fmt.Errorf("failed to read config file: %w", err)
}
}
// Unmarshal all configuration into the global cfg struct.
// Viper correctly handles the precedence of flags > env > config.
if err := viper.Unmarshal(&cfg); err != nil {
return fmt.Errorf("failed to unmarshal config: %w", err)
}
// Separately get the debug flag, as it's not part of the main config struct.
debug = viper.GetBool("debug")
return nil
}

View File

@@ -91,14 +91,20 @@ var _ compbasemetrics.StableCollector = &resourceMetricsCollector{}
// DescribeWithStability implements compbasemetrics.StableCollector
func (rc *resourceMetricsCollector) DescribeWithStability(ch chan<- *compbasemetrics.Desc) {
ch <- nodeCPUUsageDesc
ch <- nodeMemoryUsageDesc
ch <- containerStartTimeDesc
ch <- containerCPUUsageDesc
ch <- containerMemoryUsageDesc
ch <- podCPUUsageDesc
ch <- podMemoryUsageDesc
ch <- resourceScrapeResultDesc
descs := []*compbasemetrics.Desc{
nodeCPUUsageDesc,
nodeMemoryUsageDesc,
containerStartTimeDesc,
containerCPUUsageDesc,
containerMemoryUsageDesc,
podCPUUsageDesc,
podMemoryUsageDesc,
resourceScrapeResultDesc,
}
for _, desc := range descs {
ch <- desc
}
}
// CollectWithStability implements compbasemetrics.StableCollector

View File

@@ -4,55 +4,71 @@ import (
"context"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3klog "github.com/rancher/k3k/pkg/log"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string) {
node.Status.Conditions = nodeConditions()
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
node.Status.Addresses = []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: hostname,
},
{
Type: v1.NodeInternalIP,
Address: ip,
},
}
node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
node.Labels["kubernetes.io/os"] = "linux"
// configure versions
node.Status.NodeInfo.KubeletVersion = version
updateNodeCapacityInterval := 10 * time.Second
ticker := time.NewTicker(updateNodeCapacityInterval)
go func() {
for range ticker.C {
if err := updateNodeCapacity(coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
logger.Error("error updating node capacity", err)
}
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) {
ctx := context.Background()
if mirrorHostNodes {
hostNode, err := coreClient.Nodes().Get(ctx, node.Name, metav1.GetOptions{})
if err != nil {
logger.Error(err, "error getting host node for mirroring", err)
}
}()
node.Spec = *hostNode.Spec.DeepCopy()
node.Status = *hostNode.Status.DeepCopy()
node.Labels = hostNode.GetLabels()
node.Annotations = hostNode.GetAnnotations()
node.Finalizers = hostNode.GetFinalizers()
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
} else {
node.Status.Conditions = nodeConditions()
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
node.Status.Addresses = []corev1.NodeAddress{
{
Type: corev1.NodeHostName,
Address: hostname,
},
{
Type: corev1.NodeInternalIP,
Address: ip,
},
}
node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
node.Labels["kubernetes.io/os"] = "linux"
// configure versions
node.Status.NodeInfo.KubeletVersion = version
updateNodeCapacityInterval := 10 * time.Second
ticker := time.NewTicker(updateNodeCapacityInterval)
go func() {
for range ticker.C {
if err := updateNodeCapacity(ctx, coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
logger.Error(err, "error updating node capacity")
}
}
}()
}
}
// nodeConditions returns the basic conditions which mark the node as ready
func nodeConditions() []v1.NodeCondition {
return []v1.NodeCondition{
func nodeConditions() []corev1.NodeCondition {
return []corev1.NodeCondition{
{
Type: "Ready",
Status: v1.ConditionTrue,
Status: corev1.ConditionTrue,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletReady",
@@ -60,7 +76,7 @@ func nodeConditions() []v1.NodeCondition {
},
{
Type: "OutOfDisk",
Status: v1.ConditionFalse,
Status: corev1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasSufficientDisk",
@@ -68,7 +84,7 @@ func nodeConditions() []v1.NodeCondition {
},
{
Type: "MemoryPressure",
Status: v1.ConditionFalse,
Status: corev1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasSufficientMemory",
@@ -76,7 +92,7 @@ func nodeConditions() []v1.NodeCondition {
},
{
Type: "DiskPressure",
Status: v1.ConditionFalse,
Status: corev1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasNoDiskPressure",
@@ -84,7 +100,7 @@ func nodeConditions() []v1.NodeCondition {
},
{
Type: "NetworkUnavailable",
Status: v1.ConditionFalse,
Status: corev1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "RouteCreated",
@@ -95,9 +111,7 @@ func nodeConditions() []v1.NodeCondition {
// updateNodeCapacity will update the virtual node capacity (and the allocatable field) with the sum of all the resource in the host nodes.
// If the nodeLabels are specified only the matching nodes will be considered.
func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error {
ctx := context.Background()
func updateNodeCapacity(ctx context.Context, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error {
capacity, allocatable, err := getResourcesFromNodes(ctx, coreClient, nodeLabels)
if err != nil {
return err
@@ -116,7 +130,7 @@ func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client
// getResourcesFromNodes will return a sum of all the resource capacity of the host nodes, and the allocatable resources.
// If some node labels are specified only the matching nodes will be considered.
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (v1.ResourceList, v1.ResourceList, error) {
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (corev1.ResourceList, corev1.ResourceList, error) {
listOpts := metav1.ListOptions{}
if nodeLabels != nil {

View File

@@ -3,6 +3,7 @@ package provider
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"maps"
@@ -11,41 +12,37 @@ import (
"strings"
"time"
"github.com/go-logr/logr"
"github.com/google/go-cmp/cmp"
dto "github.com/prometheus/client_model/go"
"github.com/rancher/k3k/k3k-kubelet/controller"
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
"github.com/virtual-kubelet/virtual-kubelet/node/api"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/utils/ptr"
"errors"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/portforward"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/client-go/transport/spdy"
compbasemetrics "k8s.io/component-base/metrics"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
dto "github.com/prometheus/client_model/go"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
compbasemetrics "k8s.io/component-base/metrics"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
)
// check at compile time if the Provider implements the nodeutil.Provider interface
@@ -54,24 +51,22 @@ var _ nodeutil.Provider = (*Provider)(nil)
// Provider implements nodetuil.Provider from virtual Kubelet.
// TODO: Implement NotifyPods and the required usage so that this can be an async provider
type Provider struct {
Handler controller.ControllerHandler
Translator translate.ToHostTranslator
HostClient client.Client
VirtualClient client.Client
VirtualManager manager.Manager
ClientConfig rest.Config
CoreClient cv1.CoreV1Interface
ClusterNamespace string
ClusterName string
serverIP string
dnsIP string
logger *k3klog.Logger
logger logr.Logger
}
var (
ErrRetryTimeout = errors.New("provider timed out")
)
var ErrRetryTimeout = errors.New("provider timed out")
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3klog.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger logr.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
coreClient, err := cv1.NewForConfig(&hostConfig)
if err != nil {
return nil, err
@@ -83,16 +78,9 @@ func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3
}
p := Provider{
Handler: controller.ControllerHandler{
Mgr: virtualMgr,
Scheme: *virtualMgr.GetScheme(),
HostClient: hostMgr.GetClient(),
VirtualClient: virtualMgr.GetClient(),
Translator: translator,
Logger: logger,
},
HostClient: hostMgr.GetClient(),
VirtualClient: virtualMgr.GetClient(),
VirtualManager: virtualMgr,
Translator: translator,
ClientConfig: hostConfig,
CoreClient: coreClient,
@@ -137,7 +125,7 @@ func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, con
}
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
p.logger.Infof("got error %s when getting logs for %s in %s", err, hostPodName, p.ClusterNamespace)
p.logger.Error(err, fmt.Sprintf("got error when getting logs for %s in %s", hostPodName, p.ClusterNamespace))
return closer, err
}
@@ -211,9 +199,9 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
// GetStatsSummary gets the stats for the node, including running pods
func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
p.logger.Debug("GetStatsSummary")
p.logger.V(1).Info("GetStatsSummary")
nodeList := &v1.NodeList{}
nodeList := &corev1.NodeList{}
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
return nil, fmt.Errorf("unable to get nodes of cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
}
@@ -256,7 +244,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
return nil, err
}
podsNameMap := make(map[string]*v1.Pod)
podsNameMap := make(map[string]*corev1.Pod)
for _, pod := range pods {
hostPodName := p.Translator.TranslateName(pod.Namespace, pod.Name)
@@ -329,7 +317,6 @@ func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port
// should send a value on stopChannel so that the PortForward is stopped. However, we only have a ReadWriteCloser
// so more work is needed to detect a close and handle that appropriately.
fw, err := portforward.New(dialer, []string{portAsString}, stopChannel, readyChannel, stream, stream)
if err != nil {
return err
}
@@ -344,7 +331,14 @@ func (p *Provider) CreatePod(ctx context.Context, pod *corev1.Pod) error {
// createPod takes a Kubernetes Pod and deploys it within the provider.
func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
tPod := pod.DeepCopy()
// fieldPath envs are not being translated correctly using the virtual kubelet pod controller
// as a workaround we will try to fetch the pod from the virtual cluster and copy over the envSource
var sourcePod corev1.Pod
if err := p.VirtualClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &sourcePod); err != nil {
return err
}
tPod := sourcePod.DeepCopy()
p.Translator.TranslateTo(tPod)
// get Cluster definition
@@ -353,7 +347,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
Name: p.ClusterName,
}
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
return fmt.Errorf("unable to get cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
@@ -373,21 +367,28 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
tPod.Spec.Hostname = k3kcontroller.SafeConcatName(pod.Name)
}
// if the priorityCluss for the virtual cluster is set then override the provided value
// if the priorityClass for the virtual cluster is set then override the provided value
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
if cluster.Spec.PriorityClass != "" {
tPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
tPod.Spec.Priority = nil
if !strings.HasPrefix(tPod.Spec.PriorityClassName, "system-") {
if tPod.Spec.PriorityClassName != "" {
tPriorityClassName := p.Translator.TranslateName("", tPod.Spec.PriorityClassName)
tPod.Spec.PriorityClassName = tPriorityClassName
}
if cluster.Spec.PriorityClass != "" {
tPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
tPod.Spec.Priority = nil
}
}
// fieldpath annotations
if err := p.configureFieldPathEnv(pod, tPod); err != nil {
if err := p.configureFieldPathEnv(&sourcePod, tPod); err != nil {
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
// volumes will often refer to resources in the virtual cluster, but instead need to refer to the sync'd
// host cluster version
if err := p.transformVolumes(ctx, pod.Namespace, tPod.Spec.Volumes); err != nil {
if err := p.transformVolumes(pod.Namespace, tPod.Spec.Volumes); err != nil {
return fmt.Errorf("unable to sync volumes for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
// sync serviceaccount token to a the host cluster
@@ -395,10 +396,14 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
return fmt.Errorf("unable to transform tokens for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
for i, imagePullSecret := range tPod.Spec.ImagePullSecrets {
tPod.Spec.ImagePullSecrets[i].Name = p.Translator.TranslateName(pod.Namespace, imagePullSecret.Name)
}
// inject networking information to the pod including the virtual cluster controlplane endpoint
configureNetworking(tPod, pod.Name, pod.Namespace, p.serverIP, p.dnsIP)
p.logger.Infow("creating pod",
p.logger.Info("creating pod",
"host_namespace", tPod.Namespace, "host_name", tPod.Name,
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
)
@@ -412,7 +417,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
}
// withRetry retries passed function with interval and timeout
func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Pod) error, pod *v1.Pod) error {
func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *corev1.Pod) error, pod *corev1.Pod) error {
const (
interval = 2 * time.Second
timeout = 10 * time.Second
@@ -440,58 +445,22 @@ func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Po
// transformVolumes changes the volumes to the representation in the host cluster. Will return an error
// if one/more volumes couldn't be transformed
func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, volumes []corev1.Volume) error {
func (p *Provider) transformVolumes(podNamespace string, volumes []corev1.Volume) error {
for _, volume := range volumes {
var optional bool
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
continue
}
// note: this needs to handle downward api volumes as well, but more thought is needed on how to do that
if volume.ConfigMap != nil {
if volume.ConfigMap.Optional != nil {
optional = *volume.ConfigMap.Optional
}
if err := p.syncConfigmap(ctx, podNamespace, volume.ConfigMap.Name, optional); err != nil {
return fmt.Errorf("unable to sync configmap volume %s: %w", volume.Name, err)
}
volume.ConfigMap.Name = p.Translator.TranslateName(podNamespace, volume.ConfigMap.Name)
} else if volume.Secret != nil {
if volume.Secret.Optional != nil {
optional = *volume.Secret.Optional
}
if err := p.syncSecret(ctx, podNamespace, volume.Secret.SecretName, optional); err != nil {
return fmt.Errorf("unable to sync secret volume %s: %w", volume.Name, err)
}
volume.Secret.SecretName = p.Translator.TranslateName(podNamespace, volume.Secret.SecretName)
} else if volume.Projected != nil {
for _, source := range volume.Projected.Sources {
if source.ConfigMap != nil {
if source.ConfigMap.Optional != nil {
optional = *source.ConfigMap.Optional
}
configMapName := source.ConfigMap.Name
if err := p.syncConfigmap(ctx, podNamespace, configMapName, optional); err != nil {
return fmt.Errorf("unable to sync projected configmap %s: %w", configMapName, err)
}
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, configMapName)
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, source.ConfigMap.Name)
} else if source.Secret != nil {
if source.Secret.Optional != nil {
optional = *source.Secret.Optional
}
secretName := source.Secret.Name
if err := p.syncSecret(ctx, podNamespace, secretName, optional); err != nil {
return fmt.Errorf("unable to sync projected secret %s: %w", secretName, err)
}
source.Secret.Name = p.Translator.TranslateName(podNamespace, secretName)
source.Secret.Name = p.Translator.TranslateName(podNamespace, source.Secret.Name)
}
}
} else if volume.PersistentVolumeClaim != nil {
@@ -514,71 +483,20 @@ func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, vo
return nil
}
// syncConfigmap will add the configmap object to the queue of the syncer controller to be synced to the host cluster
func (p *Provider) syncConfigmap(ctx context.Context, podNamespace string, configMapName string, optional bool) error {
var configMap corev1.ConfigMap
nsName := types.NamespacedName{
Namespace: podNamespace,
Name: configMapName,
}
if err := p.VirtualClient.Get(ctx, nsName, &configMap); err != nil {
// check if its optional configmap
if apierrors.IsNotFound(err) && optional {
return nil
}
return fmt.Errorf("unable to get configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
if err := p.Handler.AddResource(ctx, &configMap); err != nil {
return fmt.Errorf("unable to add configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
return nil
}
// syncSecret will add the secret object to the queue of the syncer controller to be synced to the host cluster
func (p *Provider) syncSecret(ctx context.Context, podNamespace string, secretName string, optional bool) error {
p.logger.Infow("Syncing secret", "Name", secretName, "Namespace", podNamespace, "optional", optional)
var secret corev1.Secret
nsName := types.NamespacedName{
Namespace: podNamespace,
Name: secretName,
}
if err := p.VirtualClient.Get(ctx, nsName, &secret); err != nil {
if apierrors.IsNotFound(err) && optional {
return nil
}
return fmt.Errorf("unable to get secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
if err := p.Handler.AddResource(ctx, &secret); err != nil {
return fmt.Errorf("unable to add secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
return nil
}
// UpdatePod executes updatePod with retry
func (p *Provider) UpdatePod(ctx context.Context, pod *corev1.Pod) error {
return p.withRetry(ctx, p.updatePod, pod)
}
func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
p.logger.Debugw("got a request for update pod")
func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
p.logger.V(1).Info("got a request for update pod")
// Once scheduled a Pod cannot update other fields than the image of the containers, initcontainers and a few others
// See: https://kubernetes.io/docs/concepts/workloads/pods/#pod-update-and-replacement
// Update Pod in the virtual cluster
var currentVirtualPod v1.Pod
var currentVirtualPod corev1.Pod
if err := p.VirtualClient.Get(ctx, client.ObjectKeyFromObject(pod), &currentVirtualPod); err != nil {
return fmt.Errorf("unable to get pod to update from virtual cluster: %w", err)
}
@@ -601,13 +519,18 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
currentHostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, currentHostPod.Name, &currentHostPod, metav1.UpdateOptions{}); err != nil {
p.logger.Errorf("error when updating ephemeral containers: %v", err)
p.logger.Error(err, "error when updating ephemeral containers")
return err
}
return nil
}
// fieldpath annotations
if err := p.configureFieldPathEnv(&currentVirtualPod, &currentHostPod); err != nil {
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
currentVirtualPod.Spec.Containers = updateContainerImages(currentVirtualPod.Spec.Containers, pod.Spec.Containers)
currentVirtualPod.Spec.InitContainers = updateContainerImages(currentVirtualPod.Spec.InitContainers, pod.Spec.InitContainers)
@@ -642,7 +565,7 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
}
// updateContainerImages will update the images of the original container images with the same name
func updateContainerImages(original, updated []v1.Container) []v1.Container {
func updateContainerImages(original, updated []corev1.Container) []corev1.Container {
newImages := make(map[string]string)
for _, c := range updated {
@@ -667,85 +590,20 @@ func (p *Provider) DeletePod(ctx context.Context, pod *corev1.Pod) error {
// expected to call the NotifyPods callback with a terminal pod status where all the containers are in a terminal
// state, as well as the pod. DeletePod may be called multiple times for the same pod.
func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
p.logger.Infof("Got request to delete pod %s", pod.Name)
p.logger.Info(fmt.Sprintf("got request to delete pod %s/%s", pod.Namespace, pod.Name))
hostName := p.Translator.TranslateName(pod.Namespace, pod.Name)
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostName, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
p.logger.Info(fmt.Sprintf("pod %s/%s already deleted from host cluster", p.ClusterNamespace, hostName))
return nil
}
return fmt.Errorf("unable to delete pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
if err = p.pruneUnusedVolumes(ctx, pod); err != nil {
// note that we don't return an error here. The pod was successfully deleted, another process
// should clean this without affecting the user
p.logger.Errorf("failed to prune leftover volumes for %s/%s: %w, resources may be left", pod.Namespace, pod.Name, err)
}
p.logger.Infof("Deleted pod %s", pod.Name)
return nil
}
// pruneUnusedVolumes removes volumes in use by pod that aren't used by any other pods
func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) error {
rawSecrets, rawConfigMaps := getSecretsAndConfigmaps(pod)
// since this pod was removed, originally mark all of the secrets/configmaps it uses as eligible
// for pruning
pruneSecrets := sets.Set[string]{}.Insert(rawSecrets...)
pruneConfigMap := sets.Set[string]{}.Insert(rawConfigMaps...)
var pods corev1.PodList
// only pods in the same namespace could be using secrets/configmaps that this pod is using
err := p.VirtualClient.List(ctx, &pods, &client.ListOptions{
Namespace: pod.Namespace,
})
if err != nil {
return fmt.Errorf("unable to list pods: %w", err)
}
for _, vPod := range pods.Items {
if vPod.Name == pod.Name {
continue
}
secrets, configMaps := getSecretsAndConfigmaps(&vPod)
pruneSecrets.Delete(secrets...)
pruneConfigMap.Delete(configMaps...)
}
for _, secretName := range pruneSecrets.UnsortedList() {
var secret corev1.Secret
key := types.NamespacedName{
Name: secretName,
Namespace: pod.Namespace,
}
if err := p.VirtualClient.Get(ctx, key, &secret); err != nil {
return fmt.Errorf("unable to get secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
}
if err = p.Handler.RemoveResource(ctx, &secret); err != nil {
return fmt.Errorf("unable to remove secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
}
}
for _, configMapName := range pruneConfigMap.UnsortedList() {
var configMap corev1.ConfigMap
key := types.NamespacedName{
Name: configMapName,
Namespace: pod.Namespace,
}
if err := p.VirtualClient.Get(ctx, key, &configMap); err != nil {
return fmt.Errorf("unable to get configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
}
if err = p.Handler.RemoveResource(ctx, &configMap); err != nil {
return fmt.Errorf("unable to remove configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
}
}
p.logger.Info(fmt.Sprintf("pod %s/%s deleted from host cluster", p.ClusterNamespace, hostName))
return nil
}
@@ -755,7 +613,7 @@ func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) erro
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.Pod, error) {
p.logger.Debugw("got a request for get pod", "Namespace", namespace, "Name", name)
p.logger.V(1).Info("got a request for get pod", "namespace", namespace, "name", name)
hostNamespaceName := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: p.Translator.TranslateName(namespace, name),
@@ -777,14 +635,14 @@ func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*corev1.PodStatus, error) {
p.logger.Debugw("got a request for pod status", "Namespace", namespace, "Name", name)
p.logger.V(1).Info("got a request for pod status", "namespace", namespace, "name", name)
pod, err := p.GetPod(ctx, namespace, name)
if err != nil {
return nil, fmt.Errorf("unable to get pod for status: %w", err)
}
p.logger.Debugw("got pod status", "Namespace", namespace, "Name", name, "Status", pod.Status)
p.logger.V(1).Info("got pod status", "namespace", namespace, "name", name, "status", pod.Status)
return pod.Status.DeepCopy(), nil
}
@@ -804,8 +662,8 @@ func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
selector = selector.Add(*requirement)
var podList corev1.PodList
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
if err != nil {
return nil, fmt.Errorf("unable to list pods: %w", err)
}
@@ -848,7 +706,7 @@ func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP
"svc.cluster.local",
"cluster.local",
},
Options: []v1.PodDNSConfigOption{
Options: []corev1.PodDNSConfigOption{
{
Name: "ndots",
Value: ptr.To("5"),
@@ -866,22 +724,22 @@ func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP
// inject networking information to the pod's environment variables
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].Env = overrideEnvVars(pod.Spec.Containers[i].Env, updatedEnvVars)
pod.Spec.Containers[i].Env = mergeEnvVars(pod.Spec.Containers[i].Env, updatedEnvVars)
}
// handle init containers as well
for i := range pod.Spec.InitContainers {
pod.Spec.InitContainers[i].Env = overrideEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
pod.Spec.InitContainers[i].Env = mergeEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
}
// handle ephemeral containers as well
for i := range pod.Spec.EphemeralContainers {
pod.Spec.EphemeralContainers[i].Env = overrideEnvVars(pod.Spec.EphemeralContainers[i].Env, updatedEnvVars)
pod.Spec.EphemeralContainers[i].Env = mergeEnvVars(pod.Spec.EphemeralContainers[i].Env, updatedEnvVars)
}
}
// overrideEnvVars will override the orig environment variables if found in the updated list
func overrideEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
// mergeEnvVars will override the orig environment variables if found in the updated list and will add them to the list if not found
func mergeEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
if len(updated) == 0 {
return orig
}
@@ -892,46 +750,26 @@ func overrideEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
updatedEnvVarMap[updatedEnvVar.Name] = updatedEnvVar
}
for i, origEnvVar := range orig {
if updatedEnvVar, found := updatedEnvVarMap[origEnvVar.Name]; found {
orig[i] = updatedEnvVar
for i, env := range orig {
if updatedEnv, ok := updatedEnvVarMap[env.Name]; ok {
orig[i] = updatedEnv
// Remove the updated variable from the map
delete(updatedEnvVarMap, env.Name)
}
}
// Any variables remaining in the map are new and should be appended to the original slice.
for _, env := range updatedEnvVarMap {
orig = append(orig, env)
}
return orig
}
// getSecretsAndConfigmaps retrieves a list of all secrets/configmaps that are in use by a given pod. Useful
// for removing/seeing which virtual cluster resources need to be in the host cluster.
func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
var (
secrets []string
configMaps []string
)
for _, volume := range pod.Spec.Volumes {
if volume.Secret != nil {
secrets = append(secrets, volume.Secret.SecretName)
} else if volume.ConfigMap != nil {
configMaps = append(configMaps, volume.ConfigMap.Name)
} else if volume.Projected != nil {
for _, source := range volume.Projected.Sources {
if source.ConfigMap != nil {
configMaps = append(configMaps, source.ConfigMap.Name)
} else if source.Secret != nil {
secrets = append(secrets, source.Secret.Name)
}
}
}
}
return secrets, configMaps
}
// configureFieldPathEnv will retrieve all annotations created by the pod mutator webhook
// configureFieldPathEnv will retrieve all annotations created by the pod mutating webhook
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
// assigned annotations
func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {
for _, container := range pod.Spec.EphemeralContainers {
addFieldPathAnnotationToEnv(container.Env)
}
@@ -951,10 +789,10 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
return err
}
// re-adding these envs to the pod
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, v1.EnvVar{
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, corev1.EnvVar{
Name: envName,
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: value,
},
},
@@ -967,7 +805,7 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
return nil
}
func addFieldPathAnnotationToEnv(envVars []v1.EnvVar) {
func addFieldPathAnnotationToEnv(envVars []corev1.EnvVar) {
for j, envVar := range envVars {
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
continue

View File

@@ -5,10 +5,9 @@ import (
"testing"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
)
func Test_overrideEnvVars(t *testing.T) {
func Test_mergeEnvVars(t *testing.T) {
type args struct {
orig []corev1.EnvVar
new []corev1.EnvVar
@@ -22,49 +21,49 @@ func Test_overrideEnvVars(t *testing.T) {
{
name: "orig and new are empty",
args: args{
orig: []v1.EnvVar{},
new: []v1.EnvVar{},
orig: []corev1.EnvVar{},
new: []corev1.EnvVar{},
},
want: []v1.EnvVar{},
want: []corev1.EnvVar{},
},
{
name: "only orig is empty",
args: args{
orig: []v1.EnvVar{},
new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
orig: []corev1.EnvVar{},
new: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
},
want: []v1.EnvVar{},
want: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
},
{
name: "orig has a matching element",
args: args{
orig: []v1.EnvVar{{Name: "FOO", Value: "old_val"}},
new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
orig: []corev1.EnvVar{{Name: "FOO", Value: "old_val"}},
new: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
},
want: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
want: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
},
{
name: "orig have multiple elements",
args: args{
orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}},
orig: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
new: []corev1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}},
},
want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
},
{
name: "orig and new have multiple elements and some not matching",
args: args{
orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
orig: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
new: []corev1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
},
want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := overrideEnvVars(tt.args.orig, tt.args.new); !reflect.DeepEqual(got, tt.want) {
t.Errorf("overrideEnvVars() = %v, want %v", got, tt.want)
if got := mergeEnvVars(tt.args.orig, tt.args.new); !reflect.DeepEqual(got, tt.want) {
t.Errorf("mergeEnvVars() = %v, want %v", got, tt.want)
}
})
}

View File

@@ -5,12 +5,14 @@ import (
"fmt"
"strings"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
)
const (
@@ -21,7 +23,7 @@ const (
// transformTokens copies the serviceaccount tokens used by pod's serviceaccount to a secret on the host cluster and mount it
// to look like the serviceaccount token
func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error {
p.logger.Infow("transforming token", "Pod", pod.Name, "Namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
p.logger.Info("transforming token", "pod", pod.Name, "namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
// skip this process if the kube-api-access is already removed from the pod
// this is needed in case users already adds their own custom tokens like in rancher imported clusters
@@ -144,7 +146,8 @@ func removeKubeAccessVolume(pod *corev1.Pod) {
}
func addKubeAccessVolume(pod *corev1.Pod, hostSecretName string) {
var tokenVolumeName = k3kcontroller.SafeConcatNameWithPrefix(kubeAPIAccessPrefix)
tokenVolumeName := k3kcontroller.SafeConcatNameWithPrefix(kubeAPIAccessPrefix)
pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{
Name: tokenVolumeName,
VolumeSource: corev1.VolumeSource{

View File

@@ -4,8 +4,11 @@ import (
"encoding/hex"
"strings"
"github.com/rancher/k3k/pkg/controller"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
const (
@@ -33,6 +36,13 @@ type ToHostTranslator struct {
ClusterNamespace string
}
func NewHostTranslator(cluster *v1beta1.Cluster) *ToHostTranslator {
return &ToHostTranslator{
ClusterName: cluster.Name,
ClusterNamespace: cluster.Namespace,
}
}
// Translate translates a virtual cluster object to a host cluster object. This should only be used for
// static resources such as configmaps/secrets, and not for things like pods (which can reference other
// objects). Note that this won't set host-cluster values (like resource version) so when updating you
@@ -124,3 +134,11 @@ func (t *ToHostTranslator) TranslateName(namespace string, name string) string {
return controller.SafeConcatName(namePrefix, nameSuffix)
}
// NamespacedName returns the types.NamespacedName of the resource in the host cluster
func (t *ToHostTranslator) NamespacedName(obj client.Object) types.NamespacedName {
return types.NamespacedName{
Namespace: t.ClusterNamespace,
Name: t.TranslateName(obj.GetNamespace(), obj.GetName()),
}
}

194
main.go
View File

@@ -6,112 +6,89 @@ import (
"errors"
"fmt"
"os"
"os/signal"
"syscall"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/policy"
"github.com/rancher/k3k/pkg/log"
"github.com/urfave/cli/v2"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/manager"
v1 "k8s.io/api/core/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/policy"
"github.com/rancher/k3k/pkg/log"
)
var (
scheme = runtime.NewScheme()
clusterCIDR string
sharedAgentImage string
sharedAgentImagePullPolicy string
kubeconfig string
k3SImage string
k3SImagePullPolicy string
debug bool
logger *log.Logger
flags = []cli.Flag{
&cli.StringFlag{
Name: "kubeconfig",
EnvVars: []string{"KUBECONFIG"},
Usage: "Kubeconfig path",
Destination: &kubeconfig,
},
&cli.StringFlag{
Name: "cluster-cidr",
EnvVars: []string{"CLUSTER_CIDR"},
Usage: "Cluster CIDR to be added to the networkpolicy",
Destination: &clusterCIDR,
},
&cli.StringFlag{
Name: "shared-agent-image",
EnvVars: []string{"SHARED_AGENT_IMAGE"},
Usage: "K3K Virtual Kubelet image",
Value: "rancher/k3k:latest",
Destination: &sharedAgentImage,
},
&cli.StringFlag{
Name: "shared-agent-pull-policy",
EnvVars: []string{"SHARED_AGENT_PULL_POLICY"},
Usage: "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never",
Destination: &sharedAgentImagePullPolicy,
},
&cli.BoolFlag{
Name: "debug",
EnvVars: []string{"DEBUG"},
Usage: "Debug level logging",
Destination: &debug,
},
&cli.StringFlag{
Name: "k3s-image",
EnvVars: []string{"K3S_IMAGE"},
Usage: "K3K server image",
Value: "rancher/k3k",
Destination: &k3SImage,
},
&cli.StringFlag{
Name: "k3s-image-pull-policy",
EnvVars: []string{"K3S_IMAGE_PULL_POLICY"},
Usage: "K3K server image pull policy",
Destination: &k3SImagePullPolicy,
},
}
scheme = runtime.NewScheme()
config cluster.Config
kubeconfig string
kubeletPortRange string
webhookPortRange string
maxConcurrentReconciles int
debug bool
logFormat string
logger logr.Logger
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = v1alpha1.AddToScheme(scheme)
_ = v1beta1.AddToScheme(scheme)
}
func main() {
app := cmds.NewApp()
app.Flags = flags
app.Action = run
app.Version = buildinfo.Version
app.Before = func(clx *cli.Context) error {
if err := validate(); err != nil {
return err
}
logger = log.New(debug)
return nil
rootCmd := &cobra.Command{
Use: "k3k",
Short: "k3k controller",
Version: buildinfo.Version,
PreRunE: func(cmd *cobra.Command, args []string) error {
return validate()
},
PersistentPreRun: func(cmd *cobra.Command, args []string) {
cmds.InitializeConfig(cmd)
logger = zapr.NewLogger(log.New(debug, logFormat))
},
RunE: run,
}
if err := app.Run(os.Args); err != nil {
logger.Fatalw("failed to run k3k controller", zap.Error(err))
rootCmd.PersistentFlags().BoolVarP(&debug, "debug", "", false, "Debug level logging")
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "text", "Log format (text or json)")
rootCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", "", "kubeconfig path")
rootCmd.PersistentFlags().StringVar(&config.ClusterCIDR, "cluster-cidr", "", "Cluster CIDR to be added to the networkpolicy")
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImage, "agent-shared-image", "rancher/k3k-kubelet", "K3K Virtual Kubelet image")
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImagePullPolicy, "agent-shared-image-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImage, "agent-virtual-image", "rancher/k3s", "K3S Virtual Agent image")
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImagePullPolicy, "agent-virtual-image-pull-policy", "", "K3S Virtual Agent image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&kubeletPortRange, "kubelet-port-range", "50000-51000", "Port Range for k3k kubelet in shared mode")
rootCmd.PersistentFlags().StringVar(&webhookPortRange, "webhook-port-range", "51001-52000", "Port Range for k3k kubelet webhook in shared mode")
rootCmd.PersistentFlags().StringVar(&config.K3SServerImage, "k3s-server-image", "rancher/k3s", "K3K server image")
rootCmd.PersistentFlags().StringVar(&config.K3SServerImagePullPolicy, "k3s-server-image-pull-policy", "", "K3K server image pull policy")
rootCmd.PersistentFlags().StringSliceVar(&config.ServerImagePullSecrets, "server-image-pull-secret", nil, "Image pull secret used for for servers")
rootCmd.PersistentFlags().StringSliceVar(&config.AgentImagePullSecrets, "agent-image-pull-secret", nil, "Image pull secret used for for agents")
rootCmd.PersistentFlags().IntVar(&maxConcurrentReconciles, "max-concurrent-reconciles", 50, "maximum number of concurrent reconciles")
if err := rootCmd.Execute(); err != nil {
logger.Error(err, "failed to run k3k controller")
}
}
func run(clx *cli.Context) error {
ctx := context.Background()
func run(cmd *cobra.Command, args []string) error {
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer stop()
logger.Info("Starting k3k - Version: " + buildinfo.Version)
ctrlruntimelog.SetLogger(logger)
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
@@ -121,43 +98,64 @@ func run(clx *cli.Context) error {
mgr, err := ctrl.NewManager(restConfig, manager.Options{
Scheme: scheme,
})
if err != nil {
return fmt.Errorf("failed to create new controller runtime manager: %v", err)
}
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
logger.Info("adding cluster controller")
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage, k3SImagePullPolicy); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
portAllocator, err := agent.NewPortAllocator(ctx, mgr.GetClient())
if err != nil {
return err
}
logger.Info("adding etcd pod controller")
runnable := portAllocator.InitPortAllocatorConfig(ctx, mgr.GetClient(), kubeletPortRange, webhookPortRange)
if err := mgr.Add(runnable); err != nil {
return err
}
if err := cluster.AddPodController(ctx, mgr); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
if err := cluster.Add(ctx, mgr, &config, maxConcurrentReconciles, portAllocator, nil); err != nil {
return fmt.Errorf("failed to add cluster controller: %v", err)
}
logger.Info("adding statefulset controller")
if err := cluster.AddStatefulSetController(ctx, mgr, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add statefulset controller: %v", err)
}
logger.Info("adding service controller")
if err := cluster.AddServiceController(ctx, mgr, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add service controller: %v", err)
}
logger.Info("adding pod controller")
if err := cluster.AddPodController(ctx, mgr, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add pod controller: %v", err)
}
logger.Info("adding clusterpolicy controller")
if err := policy.Add(mgr, clusterCIDR); err != nil {
return fmt.Errorf("failed to add the clusterpolicy controller: %v", err)
if err := policy.Add(mgr, config.ClusterCIDR, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add clusterpolicy controller: %v", err)
}
if err := mgr.Start(ctx); err != nil {
return fmt.Errorf("failed to start the manager: %v", err)
return fmt.Errorf("failed to start manager: %v", err)
}
logger.Info("controller manager stopped")
return nil
}
func validate() error {
if sharedAgentImagePullPolicy != "" {
if sharedAgentImagePullPolicy != string(v1.PullAlways) &&
sharedAgentImagePullPolicy != string(v1.PullIfNotPresent) &&
sharedAgentImagePullPolicy != string(v1.PullNever) {
if config.SharedAgentImagePullPolicy != "" {
if config.SharedAgentImagePullPolicy != string(v1.PullAlways) &&
config.SharedAgentImagePullPolicy != string(v1.PullIfNotPresent) &&
config.SharedAgentImagePullPolicy != string(v1.PullNever) {
return errors.New("invalid value for shared agent image policy")
}
}

View File

@@ -1,5 +1,3 @@
package k3k
var (
GroupName = "k3k.io"
)
var GroupName = "k3k.io"

View File

@@ -1,3 +1,3 @@
// +k8s:deepcopy-gen=package
// +groupName=k3k.io
package v1alpha1
package v1beta1

View File

@@ -1,14 +1,16 @@
package v1alpha1
package v1beta1
import (
k3k "github.com/rancher/k3k/pkg/apis/k3k.io"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k3k "github.com/rancher/k3k/pkg/apis/k3k.io"
)
var (
SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1alpha1"}
SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1beta1"}
SchemBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemBuilder.AddToScheme
)

View File

@@ -1,4 +1,4 @@
package v1alpha1
package v1beta1
import (
v1 "k8s.io/api/core/v1"
@@ -11,6 +11,7 @@ import (
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.mode",name=Mode,type=string
// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string"
// +kubebuilder:printcolumn:JSONPath=".status.policyName",name=Policy,type=string
// Cluster defines a virtual Kubernetes cluster managed by k3k.
@@ -28,6 +29,7 @@ type Cluster struct {
// Status reflects the observed state of the Cluster.
//
// +kubebuilder:default={}
// +optional
Status ClusterStatus `json:"status,omitempty"`
}
@@ -39,7 +41,7 @@ type ClusterSpec struct {
// If not specified, the Kubernetes version of the host node will be used.
//
// +optional
Version string `json:"version"`
Version string `json:"version,omitempty"`
// Mode specifies the cluster provisioning mode: "shared" or "virtual".
// Defaults to "shared". This field is immutable.
@@ -95,12 +97,13 @@ type ClusterSpec struct {
// Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.
// A default StorageClass is required for dynamic persistence.
//
// +kubebuilder:default={type: "dynamic"}
Persistence PersistenceConfig `json:"persistence,omitempty"`
// +optional
Persistence PersistenceConfig `json:"persistence"`
// Expose specifies options for exposing the API server.
// By default, it's only exposed as a ClusterIP.
//
// +kubebuilder:validation:XValidation:rule="[has(self.ingress), has(self.loadBalancer), has(self.nodePort)].filter(x, x).size() <= 1",message="ingress, loadbalancer and nodePort are mutually exclusive; only one can be set"
// +optional
Expose *ExposeConfig `json:"expose,omitempty"`
@@ -120,7 +123,7 @@ type ClusterSpec struct {
// The Secret must have a "token" field in its data.
//
// +optional
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef"`
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef,omitempty"`
// TLSSANs specifies subject alternative names for the K3s server certificate.
//
@@ -163,6 +166,147 @@ type ClusterSpec struct {
//
// +optional
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
// MirrorHostNodes controls whether node objects from the host cluster
// are mirrored into the virtual cluster.
//
// +optional
MirrorHostNodes bool `json:"mirrorHostNodes,omitempty"`
// CustomCAs specifies the cert/key pairs for custom CA certificates.
//
// +optional
CustomCAs *CustomCAs `json:"customCAs,omitempty"`
// Sync specifies the resources types that will be synced from virtual cluster to host cluster.
//
// +kubebuilder:default={}
// +optional
Sync *SyncConfig `json:"sync,omitempty"`
}
// SyncConfig will contain the resources that should be synced from virtual cluster to host cluster.
type SyncConfig struct {
// Services resources sync configuration.
//
// +kubebuilder:default={"enabled": true}
// +optional
Services ServiceSyncConfig `json:"services"`
// ConfigMaps resources sync configuration.
//
// +kubebuilder:default={"enabled": true}
// +optional
ConfigMaps ConfigMapSyncConfig `json:"configMaps"`
// Secrets resources sync configuration.
//
// +kubebuilder:default={"enabled": true}
// +optional
Secrets SecretSyncConfig `json:"secrets"`
// Ingresses resources sync configuration.
//
// +kubebuilder:default={"enabled": false}
// +optional
Ingresses IngressSyncConfig `json:"ingresses"`
// PersistentVolumeClaims resources sync configuration.
//
// +kubebuilder:default={"enabled": true}
// +optional
PersistentVolumeClaims PersistentVolumeClaimSyncConfig `json:"persistentVolumeClaims"`
// PriorityClasses resources sync configuration.
//
// +kubebuilder:default={"enabled": false}
// +optional
PriorityClasses PriorityClassSyncConfig `json:"priorityClasses"`
}
// SecretSyncConfig specifies the sync options for services.
type SecretSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
// +kubebuilder:default=true
// +optional
Enabled bool `json:"enabled,omitempty"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// ServiceSyncConfig specifies the sync options for services.
type ServiceSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
// +kubebuilder:default=true
// +required
Enabled bool `json:"enabled"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// ConfigMapSyncConfig specifies the sync options for services.
type ConfigMapSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
// +kubebuilder:default=true
// +required
Enabled bool `json:"enabled"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// IngressSyncConfig specifies the sync options for services.
type IngressSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
// +kubebuilder:default=false
// +required
Enabled bool `json:"enabled"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// PersistentVolumeClaimSyncConfig specifies the sync options for services.
type PersistentVolumeClaimSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
// +kubebuilder:default=true
// +required
Enabled bool `json:"enabled"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// PriorityClassSyncConfig specifies the sync options for services.
type PriorityClassSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
// +kubebuilder:default=false
// +required
Enabled bool `json:"enabled"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// ClusterMode is the possible provisioning mode of a Cluster.
@@ -206,7 +350,7 @@ type PersistenceConfig struct {
// Type specifies the persistence mode.
//
// +kubebuilder:default="dynamic"
Type PersistenceMode `json:"type"`
Type PersistenceMode `json:"type,omitempty"`
// StorageClassName is the name of the StorageClass to use for the PVC.
// This field is only relevant in "dynamic" mode.
@@ -217,6 +361,7 @@ type PersistenceConfig struct {
// StorageRequestSize is the requested size for the PVC.
// This field is only relevant in "dynamic" mode.
//
// +kubebuilder:default="2G"
// +optional
StorageRequestSize string `json:"storageRequestSize,omitempty"`
}
@@ -231,7 +376,7 @@ type ExposeConfig struct {
// LoadBalancer specifies options for exposing the API server through a LoadBalancer service.
//
// +optional
LoadBalancer *LoadBalancerConfig `json:"loadbalancer,omitempty"`
LoadBalancer *LoadBalancerConfig `json:"loadBalancer,omitempty"`
// NodePort specifies options for exposing the API server through NodePort.
//
@@ -286,6 +431,49 @@ type NodePortConfig struct {
ETCDPort *int32 `json:"etcdPort,omitempty"`
}
// CustomCAs specifies the cert/key pairs for custom CA certificates.
type CustomCAs struct {
// Enabled toggles this feature on or off.
//
// +kubebuilder:default=true
Enabled bool `json:"enabled"`
// Sources defines the sources for all required custom CA certificates.
Sources CredentialSources `json:"sources"`
}
// CredentialSources lists all the required credentials, including both
// TLS key pairs and single signing keys.
type CredentialSources struct {
// ServerCA specifies the server-ca cert/key pair.
ServerCA CredentialSource `json:"serverCA"`
// ClientCA specifies the client-ca cert/key pair.
ClientCA CredentialSource `json:"clientCA"`
// RequestHeaderCA specifies the request-header-ca cert/key pair.
RequestHeaderCA CredentialSource `json:"requestHeaderCA"`
// ETCDServerCA specifies the etcd-server-ca cert/key pair.
ETCDServerCA CredentialSource `json:"etcdServerCA"`
// ETCDPeerCA specifies the etcd-peer-ca cert/key pair.
ETCDPeerCA CredentialSource `json:"etcdPeerCA"`
// ServiceAccountToken specifies the service-account-token key.
ServiceAccountToken CredentialSource `json:"serviceAccountToken"`
}
// CredentialSource defines where to get a credential from.
// It can represent either a TLS key pair or a single private key.
type CredentialSource struct {
// SecretName specifies the name of an existing secret to use.
// The controller expects specific keys inside based on the credential type:
// - For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
// - For ServiceAccountTokenKey: 'tls.key'.
SecretName string `json:"secretName"`
}
// ClusterStatus reflects the observed state of a Cluster.
type ClusterStatus struct {
// HostVersion is the Kubernetes version of the host node.
@@ -313,17 +501,46 @@ type ClusterStatus struct {
// +optional
TLSSANs []string `json:"tlsSANs,omitempty"`
// Persistence specifies options for persisting etcd data.
//
// +optional
Persistence PersistenceConfig `json:"persistence,omitempty"`
// PolicyName specifies the virtual cluster policy name bound to the virtual cluster.
//
// +optional
PolicyName string `json:"policyName,omitempty"`
// KubeletPort specefies the port used by k3k-kubelet in shared mode.
//
// +optional
KubeletPort int `json:"kubeletPort,omitempty"`
// WebhookPort specefies the port used by webhook in k3k-kubelet in shared mode.
//
// +optional
WebhookPort int `json:"webhookPort,omitempty"`
// Conditions are the individual conditions for the cluster set.
//
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// Phase is a high-level summary of the cluster's current lifecycle state.
//
// +kubebuilder:default="Unknown"
// +kubebuilder:validation:Enum=Pending;Provisioning;Ready;Failed;Terminating;Unknown
// +optional
Phase ClusterPhase `json:"phase,omitempty"`
}
// ClusterPhase is a high-level summary of the cluster's current lifecycle state.
type ClusterPhase string
const (
ClusterPending = ClusterPhase("Pending")
ClusterProvisioning = ClusterPhase("Provisioning")
ClusterReady = ClusterPhase("Ready")
ClusterFailed = ClusterPhase("Failed")
ClusterTerminating = ClusterPhase("Terminating")
ClusterUnknown = ClusterPhase("Unknown")
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
@@ -362,7 +579,6 @@ type VirtualClusterPolicy struct {
// VirtualClusterPolicySpec defines the desired state of a VirtualClusterPolicy.
type VirtualClusterPolicySpec struct {
// Quota specifies the resource limits for clusters within a clusterpolicy.
//
// +optional
@@ -400,6 +616,12 @@ type VirtualClusterPolicySpec struct {
//
// +optional
PodSecurityAdmissionLevel *PodSecurityAdmissionLevel `json:"podSecurityAdmissionLevel,omitempty"`
// Sync specifies the resources types that will be synced from virtual cluster to host cluster.
//
// +kubebuilder:default={}
// +optional
Sync *SyncConfig `json:"sync,omitempty"`
}
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.

View File

@@ -2,7 +2,7 @@
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
package v1beta1
import (
"k8s.io/api/core/v1"
@@ -163,6 +163,16 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
(*out)[key] = val.DeepCopy()
}
}
if in.CustomCAs != nil {
in, out := &in.CustomCAs, &out.CustomCAs
*out = new(CustomCAs)
**out = **in
}
if in.Sync != nil {
in, out := &in.Sync, &out.Sync
*out = new(SyncConfig)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
@@ -183,7 +193,13 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Persistence.DeepCopyInto(&out.Persistence)
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus.
@@ -196,6 +212,80 @@ func (in *ClusterStatus) DeepCopy() *ClusterStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigMapSyncConfig) DeepCopyInto(out *ConfigMapSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapSyncConfig.
func (in *ConfigMapSyncConfig) DeepCopy() *ConfigMapSyncConfig {
if in == nil {
return nil
}
out := new(ConfigMapSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CredentialSource) DeepCopyInto(out *CredentialSource) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialSource.
func (in *CredentialSource) DeepCopy() *CredentialSource {
if in == nil {
return nil
}
out := new(CredentialSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CredentialSources) DeepCopyInto(out *CredentialSources) {
*out = *in
out.ServerCA = in.ServerCA
out.ClientCA = in.ClientCA
out.RequestHeaderCA = in.RequestHeaderCA
out.ETCDServerCA = in.ETCDServerCA
out.ETCDPeerCA = in.ETCDPeerCA
out.ServiceAccountToken = in.ServiceAccountToken
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialSources.
func (in *CredentialSources) DeepCopy() *CredentialSources {
if in == nil {
return nil
}
out := new(CredentialSources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomCAs) DeepCopyInto(out *CustomCAs) {
*out = *in
out.Sources = in.Sources
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomCAs.
func (in *CustomCAs) DeepCopy() *CustomCAs {
if in == nil {
return nil
}
out := new(CustomCAs)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExposeConfig) DeepCopyInto(out *ExposeConfig) {
*out = *in
@@ -248,6 +338,28 @@ func (in *IngressConfig) DeepCopy() *IngressConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressSyncConfig) DeepCopyInto(out *IngressSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSyncConfig.
func (in *IngressSyncConfig) DeepCopy() *IngressSyncConfig {
if in == nil {
return nil
}
out := new(IngressSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerConfig) DeepCopyInto(out *LoadBalancerConfig) {
*out = *in
@@ -318,6 +430,115 @@ func (in *PersistenceConfig) DeepCopy() *PersistenceConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeClaimSyncConfig) DeepCopyInto(out *PersistentVolumeClaimSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSyncConfig.
func (in *PersistentVolumeClaimSyncConfig) DeepCopy() *PersistentVolumeClaimSyncConfig {
if in == nil {
return nil
}
out := new(PersistentVolumeClaimSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityClassSyncConfig) DeepCopyInto(out *PriorityClassSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassSyncConfig.
func (in *PriorityClassSyncConfig) DeepCopy() *PriorityClassSyncConfig {
if in == nil {
return nil
}
out := new(PriorityClassSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretSyncConfig) DeepCopyInto(out *SecretSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretSyncConfig.
func (in *SecretSyncConfig) DeepCopy() *SecretSyncConfig {
if in == nil {
return nil
}
out := new(SecretSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceSyncConfig) DeepCopyInto(out *ServiceSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSyncConfig.
func (in *ServiceSyncConfig) DeepCopy() *ServiceSyncConfig {
if in == nil {
return nil
}
out := new(ServiceSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncConfig) DeepCopyInto(out *SyncConfig) {
*out = *in
in.Services.DeepCopyInto(&out.Services)
in.ConfigMaps.DeepCopyInto(&out.ConfigMaps)
in.Secrets.DeepCopyInto(&out.Secrets)
in.Ingresses.DeepCopyInto(&out.Ingresses)
in.PersistentVolumeClaims.DeepCopyInto(&out.PersistentVolumeClaims)
in.PriorityClasses.DeepCopyInto(&out.PriorityClasses)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfig.
func (in *SyncConfig) DeepCopy() *SyncConfig {
if in == nil {
return nil
}
out := new(SyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualClusterPolicy) DeepCopyInto(out *VirtualClusterPolicy) {
*out = *in
@@ -402,6 +623,11 @@ func (in *VirtualClusterPolicySpec) DeepCopyInto(out *VirtualClusterPolicySpec)
*out = new(PodSecurityAdmissionLevel)
**out = **in
}
if in.Sync != nil {
in, out := &in.Sync, &out.Sync
*out = new(SyncConfig)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicySpec.

View File

@@ -4,13 +4,15 @@ import (
"context"
"fmt"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
const (
@@ -22,12 +24,12 @@ type ResourceEnsurer interface {
}
type Config struct {
cluster *v1alpha1.Cluster
cluster *v1beta1.Cluster
client ctrlruntimeclient.Client
scheme *runtime.Scheme
}
func NewConfig(cluster *v1alpha1.Cluster, client ctrlruntimeclient.Client, scheme *runtime.Scheme) *Config {
func NewConfig(cluster *v1beta1.Cluster, client ctrlruntimeclient.Client, scheme *runtime.Scheme) *Config {
return &Config{
cluster: cluster,
client: client,
@@ -40,11 +42,8 @@ func configSecretName(clusterName string) string {
}
func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object) error {
log := ctrl.LoggerFrom(ctx)
key := ctrlruntimeclient.ObjectKeyFromObject(obj)
log.Info(fmt.Sprintf("ensuring %T", obj), "key", key)
log := ctrl.LoggerFrom(ctx).WithValues("key", key)
if err := controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme); err != nil {
return err
@@ -52,11 +51,15 @@ func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object
if err := cfg.client.Create(ctx, obj); err != nil {
if apierrors.IsAlreadyExists(err) {
log.V(1).Info(fmt.Sprintf("Resource %T already exists, updating.", obj))
return cfg.client.Update(ctx, obj)
}
return err
}
log.V(1).Info(fmt.Sprintf("Creating %T.", obj))
return nil
}

View File

@@ -0,0 +1,253 @@
package agent
import (
"context"
"fmt"
"os"
"gopkg.in/yaml.v2"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
"sigs.k8s.io/controller-runtime/pkg/manager"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
const (
kubeletPortRangeConfigMapName = "k3k-kubelet-port-range"
webhookPortRangeConfigMapName = "k3k-webhook-port-range"
rangeKey = "range"
allocatedPortsKey = "allocatedPorts"
snapshotDataKey = "snapshotData"
)
type PortAllocator struct {
ctrlruntimeclient.Client
KubeletCM *v1.ConfigMap
WebhookCM *v1.ConfigMap
}
func NewPortAllocator(ctx context.Context, client ctrlruntimeclient.Client) (*PortAllocator, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("starting port allocator")
portRangeConfigMapNamespace := os.Getenv("CONTROLLER_NAMESPACE")
if portRangeConfigMapNamespace == "" {
return nil, fmt.Errorf("failed to find k3k controller namespace")
}
var kubeletPortRangeCM, webhookPortRangeCM v1.ConfigMap
kubeletPortRangeCM.Name = kubeletPortRangeConfigMapName
kubeletPortRangeCM.Namespace = portRangeConfigMapNamespace
webhookPortRangeCM.Name = webhookPortRangeConfigMapName
webhookPortRangeCM.Namespace = portRangeConfigMapNamespace
return &PortAllocator{
Client: client,
KubeletCM: &kubeletPortRangeCM,
WebhookCM: &webhookPortRangeCM,
}, nil
}
func (a *PortAllocator) InitPortAllocatorConfig(ctx context.Context, client ctrlruntimeclient.Client, kubeletPortRange, webhookPortRange string) manager.Runnable {
return manager.RunnableFunc(func(ctx context.Context) error {
if err := a.getOrCreate(ctx, a.KubeletCM, kubeletPortRange); err != nil {
return err
}
if err := a.getOrCreate(ctx, a.WebhookCM, webhookPortRange); err != nil {
return err
}
return nil
})
}
func (a *PortAllocator) getOrCreate(ctx context.Context, configmap *v1.ConfigMap, portRange string) error {
nn := types.NamespacedName{
Name: configmap.Name,
Namespace: configmap.Namespace,
}
if err := a.Get(ctx, nn, configmap); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
// creating the configMap for the first time
configmap.Data = map[string]string{
rangeKey: portRange,
allocatedPortsKey: "",
}
configmap.BinaryData = map[string][]byte{
snapshotDataKey: []byte(""),
}
if err := a.Create(ctx, configmap); err != nil {
return fmt.Errorf("failed to create port range configmap: %w", err)
}
}
return nil
}
func (a *PortAllocator) AllocateWebhookPort(ctx context.Context, clusterName, clusterNamespace string) (int, error) {
return a.allocatePort(ctx, clusterName, clusterNamespace, a.WebhookCM)
}
func (a *PortAllocator) DeallocateWebhookPort(ctx context.Context, clusterName, clusterNamespace string, webhookPort int) error {
return a.deallocatePort(ctx, clusterName, clusterNamespace, a.WebhookCM, webhookPort)
}
func (a *PortAllocator) AllocateKubeletPort(ctx context.Context, clusterName, clusterNamespace string) (int, error) {
return a.allocatePort(ctx, clusterName, clusterNamespace, a.KubeletCM)
}
func (a *PortAllocator) DeallocateKubeletPort(ctx context.Context, clusterName, clusterNamespace string, kubeletPort int) error {
return a.deallocatePort(ctx, clusterName, clusterNamespace, a.KubeletCM, kubeletPort)
}
// allocatePort will assign port to the cluster from a port Range configured for k3k
func (a *PortAllocator) allocatePort(ctx context.Context, clusterName, clusterNamespace string, configMap *v1.ConfigMap) (int, error) {
portRange, ok := configMap.Data[rangeKey]
if !ok {
return 0, fmt.Errorf("port range is not initialized")
}
// get configMap first to avoid conflicts
if err := a.getOrCreate(ctx, configMap, portRange); err != nil {
return 0, err
}
clusterNamespaceName := clusterNamespace + "/" + clusterName
portsMap, err := parsePortMap(configMap.Data[allocatedPortsKey])
if err != nil {
return 0, err
}
if _, ok := portsMap[clusterNamespaceName]; ok {
return portsMap[clusterNamespaceName], nil
}
// allocate a new port and save the snapshot
snapshot := core.RangeAllocation{
Range: configMap.Data[rangeKey],
Data: configMap.BinaryData[snapshotDataKey],
}
pa, err := portallocator.NewFromSnapshot(&snapshot)
if err != nil {
return 0, err
}
next, err := pa.AllocateNext()
if err != nil {
return 0, err
}
portsMap[clusterNamespaceName] = next
if err := saveSnapshot(pa, &snapshot, configMap, portsMap); err != nil {
return 0, err
}
if err := a.Update(ctx, configMap); err != nil {
return 0, err
}
return next, nil
}
// deallocatePort will remove the port used by the cluster from the port range
func (a *PortAllocator) deallocatePort(ctx context.Context, clusterName, clusterNamespace string, configMap *v1.ConfigMap, port int) error {
portRange, ok := configMap.Data[rangeKey]
if !ok {
return fmt.Errorf("port range is not initialized")
}
if err := a.getOrCreate(ctx, configMap, portRange); err != nil {
return err
}
clusterNamespaceName := clusterNamespace + "/" + clusterName
portsMap, err := parsePortMap(configMap.Data[allocatedPortsKey])
if err != nil {
return err
}
// check if the cluster already exists in the configMap
if usedPort, ok := portsMap[clusterNamespaceName]; ok {
if usedPort != port {
return fmt.Errorf("port %d does not match used port %d for the cluster", port, usedPort)
}
snapshot := core.RangeAllocation{
Range: configMap.Data[rangeKey],
Data: configMap.BinaryData[snapshotDataKey],
}
pa, err := portallocator.NewFromSnapshot(&snapshot)
if err != nil {
return err
}
if err := pa.Release(port); err != nil {
return err
}
delete(portsMap, clusterNamespaceName)
if err := saveSnapshot(pa, &snapshot, configMap, portsMap); err != nil {
return err
}
}
return a.Update(ctx, configMap)
}
// parsePortMap will convert ConfigMap Data to a portMap of string keys and values of ints
func parsePortMap(portMapData string) (map[string]int, error) {
portMap := make(map[string]int)
if err := yaml.Unmarshal([]byte(portMapData), &portMap); err != nil {
return nil, fmt.Errorf("failed to parse allocatedPorts: %w", err)
}
return portMap, nil
}
// serializePortMap will convert a portMap of string keys and values of ints to ConfigMap Data
func serializePortMap(m map[string]int) (string, error) {
out, err := yaml.Marshal(m)
if err != nil {
return "", fmt.Errorf("failed to serialize allocatedPorts: %w", err)
}
return string(out), nil
}
func saveSnapshot(portAllocator *portallocator.PortAllocator, snapshot *core.RangeAllocation, configMap *v1.ConfigMap, portsMap map[string]int) error {
// save the new snapshot
if err := portAllocator.Snapshot(snapshot); err != nil {
return err
}
// update the configmap with the new portsMap and the new snapshot
configMap.BinaryData[snapshotDataKey] = snapshot.Data
configMap.Data[rangeKey] = snapshot.Range
allocatedPortsData, err := serializePortMap(portsMap)
if err != nil {
return err
}
configMap.Data[allocatedPortsKey] = allocatedPortsData
return nil
}

View File

@@ -8,41 +8,49 @@ import (
"fmt"
"time"
"k8s.io/apimachinery/pkg/util/intstr"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
)
const (
sharedKubeletConfigPath = "/opt/rancher/k3k/config.yaml"
SharedNodeAgentName = "kubelet"
SharedNodeMode = "shared"
SharedNodeAgentName = "kubelet"
SharedNodeMode = "shared"
)
type SharedAgent struct {
*Config
serviceIP string
image string
imagePullPolicy string
token string
serviceIP string
image string
imagePullPolicy string
imageRegistry string
token string
kubeletPort int
webhookPort int
imagePullSecrets []string
}
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string) *SharedAgent {
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string, kubeletPort, webhookPort int, imagePullSecrets []string) *SharedAgent {
return &SharedAgent{
Config: config,
serviceIP: serviceIP,
image: image,
imagePullPolicy: imagePullPolicy,
token: token,
Config: config,
serviceIP: serviceIP,
image: image,
imagePullPolicy: imagePullPolicy,
token: token,
kubeletPort: kubeletPort,
webhookPort: webhookPort,
imagePullSecrets: imagePullSecrets,
}
}
@@ -72,7 +80,7 @@ func (s *SharedAgent) ensureObject(ctx context.Context, obj ctrlruntimeclient.Ob
}
func (s *SharedAgent) config(ctx context.Context) error {
config := sharedAgentData(s.cluster, s.Name(), s.token, s.serviceIP)
config := sharedAgentData(s.cluster, s.Name(), s.token, s.serviceIP, s.kubeletPort, s.webhookPort)
configSecret := &v1.Secret{
TypeMeta: metav1.TypeMeta{
@@ -91,7 +99,7 @@ func (s *SharedAgent) config(ctx context.Context) error {
return s.ensureObject(ctx, configSecret)
}
func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string) string {
func sharedAgentData(cluster *v1beta1.Cluster, serviceName, token, ip string, kubeletPort, webhookPort int) string {
version := cluster.Spec.Version
if cluster.Spec.Version == "" {
version = cluster.Status.HostVersion
@@ -101,9 +109,12 @@ func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string) s
clusterNamespace: %s
serverIP: %s
serviceName: %s
token: %s
version: %s`,
cluster.Name, cluster.Namespace, ip, serviceName, token, version)
token: %v
mirrorHostNodes: %t
version: %s
webhookPort: %d
kubeletPort: %d`,
cluster.Name, cluster.Namespace, ip, serviceName, token, cluster.Spec.MirrorHostNodes, version, webhookPort, kubeletPort)
}
func (s *SharedAgent) daemonset(ctx context.Context) error {
@@ -140,7 +151,23 @@ func (s *SharedAgent) daemonset(ctx context.Context) error {
}
func (s *SharedAgent) podSpec() v1.PodSpec {
return v1.PodSpec{
hostNetwork := false
dnsPolicy := v1.DNSClusterFirst
if s.cluster.Spec.MirrorHostNodes {
hostNetwork = true
dnsPolicy = v1.DNSClusterFirstWithHostNet
}
image := s.image
if s.imageRegistry != "" {
image = s.imageRegistry + "/" + s.image
}
podSpec := v1.PodSpec{
HostNetwork: hostNetwork,
DNSPolicy: dnsPolicy,
ServiceAccountName: s.Name(),
NodeSelector: s.cluster.Spec.NodeSelector,
Volumes: []v1.Volume{
@@ -184,15 +211,11 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
Containers: []v1.Container{
{
Name: s.Name(),
Image: s.image,
Image: image,
ImagePullPolicy: v1.PullPolicy(s.imagePullPolicy),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{},
},
Args: []string{
"--config",
sharedKubeletConfigPath,
},
Env: append([]v1.EnvVar{
{
Name: "AGENT_HOSTNAME",
@@ -203,6 +226,15 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
},
},
},
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIP",
},
},
},
}, s.cluster.Spec.AgentEnvs...),
VolumeMounts: []v1.VolumeMount{
{
@@ -217,15 +249,25 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
},
},
Ports: []v1.ContainerPort{
{
Name: "kubelet-port",
Protocol: v1.ProtocolTCP,
ContainerPort: int32(s.kubeletPort),
},
{
Name: "webhook-port",
Protocol: v1.ProtocolTCP,
ContainerPort: 9443,
ContainerPort: int32(s.webhookPort),
},
},
},
},
}
for _, imagePullSecret := range s.imagePullSecrets {
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
}
return podSpec
}
func (s *SharedAgent) service(ctx context.Context) error {
@@ -249,13 +291,13 @@ func (s *SharedAgent) service(ctx context.Context) error {
{
Name: "k3s-kubelet-port",
Protocol: v1.ProtocolTCP,
Port: 10250,
Port: int32(s.kubeletPort),
},
{
Name: "webhook-server",
Protocol: v1.ProtocolTCP,
Port: 9443,
TargetPort: intstr.FromInt32(9443),
Port: int32(s.webhookPort),
TargetPort: intstr.FromInt32(int32(s.webhookPort)),
},
},
},
@@ -339,6 +381,11 @@ func (s *SharedAgent) role(ctx context.Context) error {
Resources: []string{"persistentvolumeclaims", "pods", "pods/log", "pods/attach", "pods/exec", "pods/ephemeralcontainers", "secrets", "configmaps", "services"},
Verbs: []string{"*"},
},
{
APIGroups: []string{"networking.k8s.io"},
Resources: []string{"ingresses"},
Verbs: []string{"*"},
},
{
APIGroups: []string{"k3k.io"},
Resources: []string{"clusters"},

View File

@@ -3,17 +3,21 @@ package agent
import (
"testing"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func Test_sharedAgentData(t *testing.T) {
type args struct {
cluster *v1alpha1.Cluster
cluster *v1beta1.Cluster
serviceName string
ip string
kubeletPort int
webhookPort int
token string
}
@@ -25,15 +29,17 @@ func Test_sharedAgentData(t *testing.T) {
{
name: "simple config",
args: args{
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Version: "v1.2.3",
},
},
kubeletPort: 10250,
webhookPort: 9443,
ip: "10.0.0.21",
serviceName: "service-name",
token: "dnjklsdjnksd892389238",
@@ -45,24 +51,29 @@ func Test_sharedAgentData(t *testing.T) {
"serviceName": "service-name",
"token": "dnjklsdjnksd892389238",
"version": "v1.2.3",
"mirrorHostNodes": "false",
"kubeletPort": "10250",
"webhookPort": "9443",
},
},
{
name: "version in status",
args: args{
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Version: "v1.2.3",
},
Status: v1alpha1.ClusterStatus{
Status: v1beta1.ClusterStatus{
HostVersion: "v1.3.3",
},
},
ip: "10.0.0.21",
kubeletPort: 10250,
webhookPort: 9443,
serviceName: "service-name",
token: "dnjklsdjnksd892389238",
},
@@ -73,20 +84,25 @@ func Test_sharedAgentData(t *testing.T) {
"serviceName": "service-name",
"token": "dnjklsdjnksd892389238",
"version": "v1.2.3",
"mirrorHostNodes": "false",
"kubeletPort": "10250",
"webhookPort": "9443",
},
},
{
name: "missing version in spec",
args: args{
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Status: v1alpha1.ClusterStatus{
Status: v1beta1.ClusterStatus{
HostVersion: "v1.3.3",
},
},
kubeletPort: 10250,
webhookPort: 9443,
ip: "10.0.0.21",
serviceName: "service-name",
token: "dnjklsdjnksd892389238",
@@ -98,13 +114,16 @@ func Test_sharedAgentData(t *testing.T) {
"serviceName": "service-name",
"token": "dnjklsdjnksd892389238",
"version": "v1.3.3",
"mirrorHostNodes": "false",
"kubeletPort": "10250",
"webhookPort": "9443",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip)
config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip, tt.args.kubeletPort, tt.args.webhookPort)
data := make(map[string]string)
err := yaml.Unmarshal([]byte(config), data)

View File

@@ -5,12 +5,14 @@ import (
"errors"
"fmt"
"github.com/rancher/k3k/pkg/controller"
"k8s.io/utils/ptr"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller"
)
const (
@@ -20,19 +22,22 @@ const (
type VirtualAgent struct {
*Config
serviceIP string
token string
k3SImage string
k3SImagePullPolicy string
serviceIP string
token string
Image string
ImagePullPolicy string
ImageRegistry string
imagePullSecrets []string
}
func NewVirtualAgent(config *Config, serviceIP, token string, k3SImage string, k3SImagePullPolicy string) *VirtualAgent {
func NewVirtualAgent(config *Config, serviceIP, token, Image, ImagePullPolicy string, imagePullSecrets []string) *VirtualAgent {
return &VirtualAgent{
Config: config,
serviceIP: serviceIP,
token: token,
k3SImage: k3SImage,
k3SImagePullPolicy: k3SImagePullPolicy,
Config: config,
serviceIP: serviceIP,
token: token,
Image: Image,
ImagePullPolicy: ImagePullPolicy,
imagePullSecrets: imagePullSecrets,
}
}
@@ -82,7 +87,7 @@ with-node-id: true`, serviceIP, token)
}
func (v *VirtualAgent) deployment(ctx context.Context) error {
image := controller.K3SImage(v.cluster, v.k3SImage)
image := controller.K3SImage(v.cluster, v.Image)
const name = "k3k-agent"
@@ -181,7 +186,7 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
{
Name: name,
Image: image,
ImagePullPolicy: v1.PullPolicy(v.k3SImagePullPolicy),
ImagePullPolicy: v1.PullPolicy(v.ImagePullPolicy),
SecurityContext: &v1.SecurityContext{
Privileged: ptr.To(true),
},
@@ -241,5 +246,9 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
}
}
for _, imagePullSecret := range v.imagePullSecrets {
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
}
return podSpec
}

View File

@@ -0,0 +1,35 @@
package cluster
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
v1 "k8s.io/api/core/v1"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller"
)
// newVirtualClient creates a new Client that can be used to interact with the virtual cluster
func newVirtualClient(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace string) (ctrlruntimeclient.Client, error) {
var clusterKubeConfig v1.Secret
kubeconfigSecretName := types.NamespacedName{
Name: controller.SafeConcatNameWithPrefix(clusterName, "kubeconfig"),
Namespace: clusterNamespace,
}
if err := hostClient.Get(ctx, kubeconfigSecretName, &clusterKubeConfig); err != nil {
return nil, fmt.Errorf("failed to get kubeconfig secret: %w", err)
}
restConfig, err := clientcmd.RESTConfigFromKubeConfig(clusterKubeConfig.Data["kubeconfig.yaml"])
if err != nil {
return nil, fmt.Errorf("failed to create config from kubeconfig file: %w", err)
}
return ctrlruntimeclient.New(restConfig, ctrlruntimeclient.Options{})
}

View File

@@ -5,93 +5,123 @@ import (
"errors"
"fmt"
"net"
"reflect"
"slices"
"strings"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/rancher/k3k/pkg/controller/policy"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/workqueue"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/rancher/k3k/pkg/controller/policy"
)
const (
namePrefix = "k3k"
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
ClusterInvalidName = "system"
maxConcurrentReconciles = 1
defaultVirtualClusterCIDR = "10.52.0.0/16"
defaultVirtualServiceCIDR = "10.53.0.0/16"
defaultSharedClusterCIDR = "10.42.0.0/16"
defaultSharedServiceCIDR = "10.43.0.0/16"
defaultStoragePersistentSize = "1G"
memberRemovalTimeout = time.Minute * 1
defaultVirtualClusterCIDR = "10.52.0.0/16"
defaultVirtualServiceCIDR = "10.53.0.0/16"
defaultSharedClusterCIDR = "10.42.0.0/16"
defaultSharedServiceCIDR = "10.43.0.0/16"
memberRemovalTimeout = time.Minute * 1
)
var (
ErrClusterValidation = errors.New("cluster validation error")
ErrCustomCACertSecretMissing = errors.New("custom CA certificate secret is missing")
)
type Config struct {
ClusterCIDR string
SharedAgentImage string
SharedAgentImagePullPolicy string
VirtualAgentImage string
VirtualAgentImagePullPolicy string
K3SServerImage string
K3SServerImagePullPolicy string
ServerImagePullSecrets []string
AgentImagePullSecrets []string
}
type ClusterReconciler struct {
DiscoveryClient *discovery.DiscoveryClient
Client ctrlruntimeclient.Client
Scheme *runtime.Scheme
SharedAgentImage string
SharedAgentImagePullPolicy string
K3SImage string
K3SImagePullPolicy string
DiscoveryClient *discovery.DiscoveryClient
Client client.Client
Scheme *runtime.Scheme
PortAllocator *agent.PortAllocator
record.EventRecorder
Config
}
// Add adds a new controller to the manager
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy string, k3SImage string, k3SImagePullPolicy string) error {
func Add(ctx context.Context, mgr manager.Manager, config *Config, maxConcurrentReconciles int, portAllocator *agent.PortAllocator, eventRecorder record.EventRecorder) error {
discoveryClient, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
return err
}
if sharedAgentImage == "" {
if config.SharedAgentImage == "" {
return errors.New("missing shared agent image")
}
if eventRecorder == nil {
eventRecorder = mgr.GetEventRecorderFor(clusterController)
}
// initialize a new Reconciler
reconciler := ClusterReconciler{
DiscoveryClient: discoveryClient,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
SharedAgentImage: sharedAgentImage,
SharedAgentImagePullPolicy: sharedAgentImagePullPolicy,
K3SImage: k3SImage,
K3SImagePullPolicy: k3SImagePullPolicy,
DiscoveryClient: discoveryClient,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
EventRecorder: eventRecorder,
PortAllocator: portAllocator,
Config: Config{
SharedAgentImage: config.SharedAgentImage,
SharedAgentImagePullPolicy: config.SharedAgentImagePullPolicy,
VirtualAgentImage: config.VirtualAgentImage,
VirtualAgentImagePullPolicy: config.VirtualAgentImagePullPolicy,
K3SServerImage: config.K3SServerImage,
K3SServerImagePullPolicy: config.K3SServerImagePullPolicy,
ServerImagePullSecrets: config.ServerImagePullSecrets,
AgentImagePullSecrets: config.AgentImagePullSecrets,
},
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.Cluster{}).
For(&v1beta1.Cluster{}).
Watches(&v1.Namespace{}, namespaceEventHandler(&reconciler)).
Owns(&apps.StatefulSet{}).
Owns(&v1.Service{}).
WithOptions(ctrlcontroller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
Complete(&reconciler)
}
@@ -118,7 +148,7 @@ func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
}
// Enqueue all the Cluster in the namespace
var clusterList v1alpha1.ClusterList
var clusterList v1beta1.ClusterList
if err := r.Client.List(ctx, &clusterList, client.InNamespace(oldNs.Name)); err != nil {
return
}
@@ -131,34 +161,56 @@ func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
}
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", req.NamespacedName)
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
log := ctrl.LoggerFrom(ctx)
log.Info("Reconciling Cluster")
log.Info("reconciling cluster")
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, err
return reconcile.Result{}, client.IgnoreNotFound(err)
}
// if DeletionTimestamp is not Zero -> finalize the object
if !cluster.DeletionTimestamp.IsZero() {
return c.finalizeCluster(ctx, cluster)
return c.finalizeCluster(ctx, &cluster)
}
// add finalizers
if !controllerutil.AddFinalizer(&cluster, clusterFinalizerName) {
// Set initial status if not already set
if cluster.Status.Phase == "" || cluster.Status.Phase == v1beta1.ClusterUnknown {
log.V(1).Info("Updating Cluster status phase")
cluster.Status.Phase = v1beta1.ClusterProvisioning
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
Reason: ReasonProvisioning,
Message: "Cluster is being provisioned",
})
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{Requeue: true}, nil
}
// add finalizer
if controllerutil.AddFinalizer(&cluster, clusterFinalizerName) {
log.V(1).Info("Updating Cluster adding finalizer")
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{Requeue: true}, nil
}
orig := cluster.DeepCopy()
reconcilerErr := c.reconcileCluster(ctx, &cluster)
// update Status if needed
if !reflect.DeepEqual(orig.Status, cluster.Status) {
if !equality.Semantic.DeepEqual(orig.Status, cluster.Status) {
log.Info("Updating Cluster status")
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
@@ -167,7 +219,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
// if there was an error during the reconciliation, return
if reconcilerErr != nil {
if errors.Is(reconcilerErr, bootstrap.ErrServerNotReady) {
log.Info("server not ready, requeueing")
log.V(1).Info("Server not ready, requeueing")
return reconcile.Result{RequeueAfter: time.Second * 10}, nil
}
@@ -175,7 +227,9 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
// update Cluster if needed
if !reflect.DeepEqual(orig.Spec, cluster.Spec) {
if !equality.Semantic.DeepEqual(orig.Spec, cluster.Spec) {
log.Info("Updating Cluster")
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
@@ -184,13 +238,39 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1beta1.Cluster) error {
err := c.reconcile(ctx, cluster)
c.updateStatus(ctx, cluster, err)
return err
}
func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
var ns v1.Namespace
if err := c.Client.Get(ctx, client.ObjectKey{Name: cluster.Namespace}, &ns); err != nil {
return err
}
policyName, found := ns.Labels[policy.PolicyNameLabelKey]
cluster.Status.PolicyName = policyName
if found && policyName != "" {
var policy v1beta1.VirtualClusterPolicy
if err := c.Client.Get(ctx, client.ObjectKey{Name: policyName}, &policy); err != nil {
return err
}
if err := c.validate(cluster, policy); err != nil {
return err
}
}
// if the Version is not specified we will try to use the same Kubernetes version of the host.
// This version is stored in the Status object, and it will not be updated if already set.
if cluster.Spec.Version == "" && cluster.Status.HostVersion == "" {
log.Info("cluster version not set")
log.V(1).Info("Cluster version not set. Using host version.")
hostVersion, err := c.DiscoveryClient.ServerVersion()
if err != nil {
@@ -202,29 +282,17 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
cluster.Status.HostVersion = k8sVersion + "-k3s1"
}
// TODO: update status?
if err := c.validate(cluster); err != nil {
log.Error(err, "invalid change")
return nil
}
token, err := c.token(ctx, cluster)
if err != nil {
return err
}
s := server.New(cluster, c.Client, token, string(cluster.Spec.Mode), c.K3SImage, c.K3SImagePullPolicy)
cluster.Status.Persistence = cluster.Spec.Persistence
if cluster.Spec.Persistence.StorageRequestSize == "" {
// default to 1G of request size
cluster.Status.Persistence.StorageRequestSize = defaultStoragePersistentSize
}
s := server.New(cluster, c.Client, token, c.K3SServerImage, c.K3SServerImagePullPolicy, c.ServerImagePullSecrets)
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
if cluster.Status.ClusterCIDR == "" {
cluster.Status.ClusterCIDR = defaultVirtualClusterCIDR
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
if cluster.Spec.Mode == v1beta1.SharedClusterMode {
cluster.Status.ClusterCIDR = defaultSharedClusterCIDR
}
}
@@ -232,11 +300,10 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
cluster.Status.ServiceCIDR = cluster.Spec.ServiceCIDR
if cluster.Status.ServiceCIDR == "" {
// in shared mode try to lookup the serviceCIDR
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
log.Info("looking up Service CIDR for shared mode")
if cluster.Spec.Mode == v1beta1.SharedClusterMode {
log.V(1).Info("Looking up Service CIDR for shared mode")
cluster.Status.ServiceCIDR, err = c.lookupServiceCIDR(ctx)
if err != nil {
log.Error(err, "error while looking up Cluster Service CIDR")
@@ -245,21 +312,13 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
}
// in virtual mode assign a default serviceCIDR
if cluster.Spec.Mode == v1alpha1.VirtualClusterMode {
log.Info("assign default service CIDR for virtual mode")
if cluster.Spec.Mode == v1beta1.VirtualClusterMode {
log.V(1).Info("assign default service CIDR for virtual mode")
cluster.Status.ServiceCIDR = defaultVirtualServiceCIDR
}
}
var ns v1.Namespace
if err := c.Client.Get(ctx, client.ObjectKey{Name: cluster.Namespace}, &ns); err != nil {
return err
}
policyName := ns.Labels[policy.PolicyNameLabelKey]
cluster.Status.PolicyName = policyName
if err := c.ensureNetworkPolicy(ctx, cluster); err != nil {
return err
}
@@ -291,17 +350,17 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
return err
}
if err := c.ensureKubeconfigSecret(ctx, cluster, serviceIP, token); err != nil {
if err := c.ensureKubeconfigSecret(ctx, cluster, serviceIP, 443); err != nil {
return err
}
return c.bindNodeProxyClusterRole(ctx, cluster)
return c.bindClusterRoles(ctx, cluster)
}
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP, token string) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring bootstrap secret")
log.V(1).Info("Ensuring bootstrap secret")
bootstrapData, err := bootstrap.GenerateBootstrapData(ctx, cluster, serviceIP, token)
if err != nil {
@@ -331,13 +390,13 @@ func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *
}
// ensureKubeconfigSecret will create or update the Secret containing the kubeconfig data from the k3s server
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP string, port int) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring kubeconfig secret")
log.V(1).Info("Ensuring Kubeconfig Secret")
adminKubeconfig := kubeconfig.New()
kubeconfig, err := adminKubeconfig.Generate(ctx, c.Client, cluster, serviceIP)
kubeconfig, err := adminKubeconfig.Generate(ctx, c.Client, cluster, serviceIP, port)
if err != nil {
return err
}
@@ -369,7 +428,7 @@ func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster
return err
}
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server, serviceIP string) error {
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server, serviceIP string) error {
// create init node config
initServerConfig, err := server.Config(true, serviceIP)
if err != nil {
@@ -405,11 +464,11 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
return nil
}
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring network policy")
log.V(1).Info("Ensuring network policy")
networkPolicyName := k3kcontroller.SafeConcatNameWithPrefix(cluster.Name)
networkPolicyName := controller.SafeConcatNameWithPrefix(cluster.Name)
// network policies are managed by the Policy -> delete the one created as a standalone cluster
if cluster.Status.PolicyName != "" {
@@ -420,12 +479,12 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
},
}
return ctrlruntimeclient.IgnoreNotFound(c.Client.Delete(ctx, netpol))
return client.IgnoreNotFound(c.Client.Delete(ctx, netpol))
}
expectedNetworkPolicy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: k3kcontroller.SafeConcatNameWithPrefix(cluster.Name),
Name: controller.SafeConcatNameWithPrefix(cluster.Name),
Namespace: cluster.Namespace,
},
TypeMeta: metav1.TypeMeta{
@@ -475,6 +534,7 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
}
currentNetworkPolicy := expectedNetworkPolicy.DeepCopy()
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentNetworkPolicy, func() error {
if err := controllerutil.SetControllerReference(cluster, currentNetworkPolicy, c.Scheme); err != nil {
return err
@@ -484,26 +544,25 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
return nil
})
if err != nil {
return err
}
key := client.ObjectKeyFromObject(currentNetworkPolicy)
if result != controllerutil.OperationResultNone {
log.Info("cluster network policy updated", "key", key, "result", result)
log.V(1).Info("Cluster network policy updated", "key", key, "result", result)
}
return nil
}
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1alpha1.Cluster) (*v1.Service, error) {
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1beta1.Cluster) (*v1.Service, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring cluster service")
log.V(1).Info("Ensuring Cluster Service")
expectedService := server.Service(cluster)
currentService := expectedService.DeepCopy()
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentService, func() error {
if err := controllerutil.SetControllerReference(cluster, currentService, c.Scheme); err != nil {
return err
@@ -513,22 +572,21 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v
return nil
})
if err != nil {
return nil, err
}
key := client.ObjectKeyFromObject(currentService)
if result != controllerutil.OperationResultNone {
log.Info("cluster service updated", "key", key, "result", result)
log.V(1).Info("Cluster service updated", "key", key, "result", result)
}
return currentService, nil
}
func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring cluster ingress")
log.V(1).Info("Ensuring cluster ingress")
expectedServerIngress := server.Ingress(ctx, cluster)
@@ -539,6 +597,7 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1
}
currentServerIngress := expectedServerIngress.DeepCopy()
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentServerIngress, func() error {
if err := controllerutil.SetControllerReference(cluster, currentServerIngress, c.Scheme); err != nil {
return err
@@ -549,20 +608,19 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1
return nil
})
if err != nil {
return err
}
key := client.ObjectKeyFromObject(currentServerIngress)
if result != controllerutil.OperationResultNone {
log.Info("cluster ingress updated", "key", key, "result", result)
log.V(1).Info("Cluster ingress updated", "key", key, "result", result)
}
return nil
}
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) error {
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server) error {
log := ctrl.LoggerFrom(ctx)
// create headless service for the statefulset
@@ -582,6 +640,9 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
return err
}
// Add the finalizer to the StatefulSet so the statefulset controller can handle cleanup.
controllerutil.AddFinalizer(expectedServerStatefulSet, etcdPodFinalizerName)
currentServerStatefulSet := expectedServerStatefulSet.DeepCopy()
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentServerStatefulSet, func() error {
if err := controllerutil.SetControllerReference(cluster, currentServerStatefulSet, c.Scheme); err != nil {
@@ -595,55 +656,95 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
if result != controllerutil.OperationResultNone {
key := client.ObjectKeyFromObject(currentServerStatefulSet)
log.Info("ensuring serverStatefulSet", "key", key, "result", result)
log.V(1).Info("Ensuring server StatefulSet", "key", key, "result", result)
}
return err
}
func (c *ClusterReconciler) bindNodeProxyClusterRole(ctx context.Context, cluster *v1alpha1.Cluster) error {
clusterRoleBinding := &rbacv1.ClusterRoleBinding{}
if err := c.Client.Get(ctx, types.NamespacedName{Name: "k3k-node-proxy"}, clusterRoleBinding); err != nil {
return fmt.Errorf("failed to get or find k3k-node-proxy ClusterRoleBinding: %w", err)
}
func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1beta1.Cluster) error {
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
subjectName := controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName)
var err error
found := false
for _, clusterRole := range clusterRoles {
var clusterRoleBinding rbacv1.ClusterRoleBinding
if getErr := c.Client.Get(ctx, types.NamespacedName{Name: clusterRole}, &clusterRoleBinding); getErr != nil {
err = errors.Join(err, fmt.Errorf("failed to get or find %s ClusterRoleBinding: %w", clusterRole, getErr))
continue
}
for _, subject := range clusterRoleBinding.Subjects {
if subject.Name == subjectName && subject.Namespace == cluster.Namespace {
found = true
clusterSubject := rbacv1.Subject{
Kind: rbacv1.ServiceAccountKind,
Name: controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName),
Namespace: cluster.Namespace,
}
if !slices.Contains(clusterRoleBinding.Subjects, clusterSubject) {
clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, clusterSubject)
if updateErr := c.Client.Update(ctx, &clusterRoleBinding); updateErr != nil {
err = errors.Join(err, fmt.Errorf("failed to update %s ClusterRoleBinding: %w", clusterRole, updateErr))
}
}
}
if !found {
clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbacv1.Subject{
Kind: "ServiceAccount",
Name: subjectName,
Namespace: cluster.Namespace,
})
}
return c.Client.Update(ctx, clusterRoleBinding)
return err
}
func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1beta1.Cluster, serviceIP, token string) error {
config := agent.NewConfig(cluster, c.Client, c.Scheme)
var agentEnsurer agent.ResourceEnsurer
if cluster.Spec.Mode == agent.VirtualNodeMode {
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token, c.K3SImage, c.K3SImagePullPolicy)
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token, c.VirtualAgentImage, c.VirtualAgentImagePullPolicy, c.AgentImagePullSecrets)
} else {
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token)
// Assign port from pool if shared agent enabled mirroring of host nodes
kubeletPort := 10250
webhookPort := 9443
if cluster.Spec.MirrorHostNodes {
var err error
kubeletPort, err = c.PortAllocator.AllocateKubeletPort(ctx, cluster.Name, cluster.Namespace)
if err != nil {
return err
}
cluster.Status.KubeletPort = kubeletPort
webhookPort, err = c.PortAllocator.AllocateWebhookPort(ctx, cluster.Name, cluster.Namespace)
if err != nil {
return err
}
cluster.Status.WebhookPort = webhookPort
}
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token, kubeletPort, webhookPort, c.AgentImagePullSecrets)
}
return agentEnsurer.EnsureResources(ctx)
}
func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) validate(cluster *v1beta1.Cluster, policy v1beta1.VirtualClusterPolicy) error {
if cluster.Name == ClusterInvalidName {
return errors.New("invalid cluster name " + cluster.Name + " no action will be taken")
return fmt.Errorf("%w: invalid cluster name %q", ErrClusterValidation, cluster.Name)
}
if cluster.Spec.Mode != policy.Spec.AllowedMode {
return fmt.Errorf("%w: mode %q is not allowed by the policy %q", ErrClusterValidation, cluster.Spec.Mode, policy.Name)
}
if cluster.Spec.CustomCAs != nil && cluster.Spec.CustomCAs.Enabled {
if err := c.validateCustomCACerts(cluster.Spec.CustomCAs.Sources); err != nil {
return fmt.Errorf("%w: %w", ErrClusterValidation, err)
}
}
// validate sync policy
if !equality.Semantic.DeepEqual(cluster.Spec.Sync, policy.Spec.Sync) {
return fmt.Errorf("sync configuration %v is not allowed by the policy %q", cluster.Spec.Sync, policy.Name)
}
return nil
@@ -658,7 +759,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
// Try to look for the serviceCIDR creating a failing service.
// The error should contain the expected serviceCIDR
log.Info("looking up serviceCIDR from a failing service creation")
log.V(1).Info("Looking up Service CIDR from a failing service creation")
failingSvc := v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "fail", Namespace: "default"},
@@ -670,7 +771,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
if len(splittedErrMsg) > 1 {
serviceCIDR := strings.TrimSpace(splittedErrMsg[1])
log.Info("found serviceCIDR from failing service creation: " + serviceCIDR)
log.V(1).Info("Found Service CIDR from failing service creation: " + serviceCIDR)
// validate serviceCIDR
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
@@ -684,13 +785,13 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
// Try to look for the the kube-apiserver Pod, and look for the '--service-cluster-ip-range' flag.
log.Info("looking up serviceCIDR from kube-apiserver pod")
log.V(1).Info("Looking up Service CIDR from kube-apiserver pod")
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{
matchingLabels := client.MatchingLabels(map[string]string{
"component": "kube-apiserver",
"tier": "control-plane",
})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: "kube-system"}
listOpts := &client.ListOptions{Namespace: "kube-system"}
matchingLabels.ApplyToList(listOpts)
var podList v1.PodList
@@ -707,12 +808,12 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
for _, arg := range apiServerArgs {
if strings.HasPrefix(arg, "--service-cluster-ip-range=") {
serviceCIDR := strings.TrimPrefix(arg, "--service-cluster-ip-range=")
log.Info("found serviceCIDR from kube-apiserver pod: " + serviceCIDR)
log.V(1).Info("Found Service CIDR from kube-apiserver pod: " + serviceCIDR)
// validate serviceCIDR
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
if err != nil {
log.Error(err, "serviceCIDR is not valid")
log.Error(err, "Service CIDR is not valid")
break
}
@@ -725,3 +826,17 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
return "", nil
}
// validateCustomCACerts will make sure that all the cert secrets exists
func (c *ClusterReconciler) validateCustomCACerts(credentialSources v1beta1.CredentialSources) error {
if credentialSources.ClientCA.SecretName == "" ||
credentialSources.ServerCA.SecretName == "" ||
credentialSources.ETCDPeerCA.SecretName == "" ||
credentialSources.ETCDServerCA.SecretName == "" ||
credentialSources.RequestHeaderCA.SecretName == "" ||
credentialSources.ServiceAccountToken.SecretName == "" {
return ErrCustomCACertSecretMissing
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More