Compare commits

...

73 Commits

Author SHA1 Message Date
Enrico Candino
bd2494a0a9 Bump Charts to 0.3.4 (#446) 2025-08-28 10:57:23 +02:00
Enrico Candino
237a3cb280 Bump Charts to 0.3.4-rc3 (#445) 2025-08-25 19:02:30 +02:00
Enrico Candino
d23cf86fce Fix missing custom-certs flag in cli (#444)
* fix missing custom-certs path in cli

* fix docs
2025-08-25 18:37:29 +02:00
Enrico Candino
65cb8ad123 bump chart (#440) 2025-08-19 10:57:07 +02:00
Hussein Galal
6db88b5a00 Revert "Fix pod fieldpath annotation translation (#434)" (#435)
This reverts commit 883d401ae3.
2025-08-18 14:28:19 +03:00
Hussein Galal
8d89c7d133 Fix service port for generated kubeconfig secret (#433)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-08-18 09:17:30 +03:00
Hussein Galal
883d401ae3 Fix pod fieldpath annotation translation (#434)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-08-18 09:16:58 +03:00
Enrico Candino
f85702dc23 Bump version and appVersion to 0.3.4-rc1 in Chart.yaml (#429) 2025-07-24 17:13:12 +02:00
Enrico Candino
084701fcd9 Migrate from urfave/cli to cobra (#426)
* wip

* env var fix

* cluster create

* cluster create and delete

* cluster list

* cluster cmd

* kubeconfig

* policy create

* policy delete and list, and added root commands

* removed urfavecli from k3kcli

* fix policy command

* k3k-kubelet to cobra

* updated docs

* updated go.mod

* updated test

* added deletion

* added cleanup and flake attempts

* wip bind env

* simplified config
2025-07-24 16:49:40 +02:00
Enrico Candino
5eb1d2a5bb Adding some tests for k3kcli (#417)
* adding some cli tests

* added coverage and tests

* fix lint and cli tests

* fix defer

* some more cli tests
2025-07-23 11:03:41 +02:00
Enrico Candino
98d17cdb50 Added new golangci-lint formatters (#425)
* add gci formatter

* gofmt and gofumpt

* rewrite rule

* added make fmt
2025-07-22 10:42:41 +02:00
Enrico Candino
2047a600ed Migrate golangci-lint to v2 (#424)
* golangci-lint upgrade

* fix lint
2025-07-22 10:10:26 +02:00
Hussein Galal
a98c49b59a Adding custom certificate to the virtual clusters (#409)
* Adding custom certificate to the virtual clusters

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* docs update

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* integrate cert-manager

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add individual cert tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-07-21 19:23:11 +03:00
Enrico Candino
1048e3f82d fix for portallocator initialization (#423) 2025-07-21 17:03:39 +02:00
Alex Bissessur
c480bc339e update ver for k3kcli install (#421)
Signed-off-by: xelab04 <alexbissessur@gmail.com>
2025-07-21 11:18:34 +02:00
Enrico Candino
a0af20f20f codecov (#418) 2025-07-18 11:50:57 +02:00
Enrico Candino
748a439d7a fix for restoring policy (#413) 2025-07-17 10:25:09 +02:00
Enrico Candino
0a55bec305 improve chart-release workflow (#412) 2025-07-14 15:56:30 +02:00
Enrico Candino
2ab71df139 Add Conditions and current status to Cluster (#408)
* Added Cluster Conditions

* added e2e tests

* fix lint

* cli polling

* update tests
2025-07-14 15:53:37 +02:00
Enrico Candino
753b31b52a Adding configurable maxConcurrentReconcilers and small CRD cleanup (#410)
* removed Persistence from Status, fixed default for StorageSize and StorageDefault

* added configurable maxConcurrentReconciles

* fix concurrent issues

* add validate as prereq for tests
2025-07-10 14:46:33 +02:00
Hussein Galal
fcc875ab85 Mirror host nodes (#389)
* mirror host nodes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add mirror host nodes feature

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add controllername to secrets/configmap syncer

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* golint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* build docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* setting controller namespace env

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add a controller_namespace env to the test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add mirrorHostNodes spec to conformance tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* change the ptr int to int

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix map key name

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-07-08 14:48:24 +03:00
Enrico Candino
57263bd10e fail fast matrix (#398) 2025-07-01 11:04:56 +02:00
Enrico Candino
bf82318ad9 Add PriorityClass reconciler (virtual cluster -> host) (#377)
* added priorityclass controller

* added priorityClass controller tests

* fix for update priorityClass

* fix system skip priorityclass

* fix name
2025-07-01 11:03:14 +02:00
jpgouin
1ca86d09d1 add troubleshoot how to guide (#390)
* add troubleshoot how to guide

Co-authored-by: Enrico Candino <enrico.candino@gmail.com>
2025-06-30 16:54:13 +02:00
Enrico Candino
584bae8974 bump charts (#403) 2025-06-30 10:43:53 +02:00
Hussein Galal
5a24c4edf7 bump charts to 0.3.3-r6 (#401)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-06-27 17:02:23 +03:00
Hussein Galal
44aa1a22ab Add pods/attach permission to k3k-kubelet (#400)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-06-27 16:42:05 +03:00
Enrico Candino
2b115a0b80 Add scheduled Conformance tests for shared mode (#396)
* add conformance tests with matrix

* fix serial

* splitted conformance and sigs

* push

* sig check focus fix

* cleanup cluster

* matrix for conformance tests

* removed push
2025-06-26 15:55:08 +02:00
Enrico Candino
8eb5c49ce4 bump chart (#395) 2025-06-25 10:48:52 +02:00
Enrico Candino
54ae8d2126 add named controller (#394) 2025-06-24 23:56:14 +02:00
Hussein Galal
3a101dccfd bump charts to 0.3.3-r4 (#393)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-06-24 00:11:17 +03:00
Hussein Galal
b81073619a Generate kubeconfig secret (#392)
* Generate kubeconfig secret

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-06-23 14:31:36 +03:00
Enrico Candino
f5d2e981ab Bump Charts to 0.3.3-r3 (#391)
* bump charts to 0.3.3-r2

* bump charts to 0.3.3-r3
2025-06-20 18:19:10 +02:00
jpgouin
541f506d9d [CLI] add storage-request-size flag (#372)
[CLI] add storage-request-size flag
2025-06-20 17:13:47 +02:00
Enrico Candino
f389a4e2be Fix Network Policy reconciliation (#388)
* logs

* fix delete cleanup

* update spec

* added policyName to status, skip netpol for policy managed clusters
2025-06-20 16:10:47 +02:00
jpgouin
818328c9d4 fix-howto usage of serverEnvs and agentEnvs (#385) 2025-06-20 12:32:43 +02:00
Enrico Candino
0c4752039d Fix for k3kcli policy delete (#386)
* fix for delete policy

* fix docs
2025-06-20 12:08:51 +02:00
Enrico Candino
eca219cb48 kubelet controllers cleanup (#376) 2025-06-20 12:08:28 +02:00
Hussein Galal
d1f88c32b3 Ephemeral containers fix (#371)
* Update virtual kubelet and k8s to 1.31.4

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix ephemeral containers in provider

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix linters

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix comments

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-06-20 12:52:45 +03:00
Enrico Candino
b8f0e77a71 fix for empty namespace (#375) 2025-06-19 14:28:27 +02:00
jpgouin
08ba3944e0 [DOC] add how-to create virtual clusters (#373)
* [DOC] add how-to create virtual clusters
2025-06-06 12:39:51 +02:00
Enrico Candino
09e8a180de charts-0.3.3-r1 (#370) 2025-06-04 09:50:48 +02:00
jpgouin
87032c8195 [DOC] add how to choose mode (#369)
*[DOC] add how to choose between `shared` and `virtual` mode doc
2025-06-03 11:12:12 +02:00
jpgouin
78e0c307b8 add workload exposition howto doc (#366)
* [DOC] add how to expose workloads outside the virtual cluster
2025-06-03 09:14:20 +02:00
Enrico Candino
5758b880a5 Added k3kcli cluster list and k3kcli policy list commands (#368)
* list commands

* go mod tidy

* moved logic to separate file, small refactor
2025-05-30 15:35:31 +02:00
Enrico Candino
2655d792cc Update allowedModeTypes field to allowedMode (#367)
* change allowedModeTypse to allowedMode

* added shortname "vcp" and additional mode column
2025-05-29 14:53:58 +02:00
Enrico Candino
93e1c85468 VirtualClusterPolicy documentation (#364)
* initial docs

* update for change/cleanup

* update crd docs config

* updated links

* crd docs updates
2025-05-29 11:18:26 +02:00
Enrico Candino
8fbe4b93e8 Change VirtualClusterPolicy scope to Cluster (#358)
* rename clusterset to policy

* fixes

* rename clusterset to policy

* wip

* go mod

* cluster scoped

* gomod

* gomod

* fix lint

* wip

* moved logic to vcp controller

* update for clusters

* small fixes

* update cli

* fix docs, updated spec

* fix cleanup

* added missing owns for limitranges
2025-05-29 10:45:48 +02:00
jpgouin
2515d19187 move howtos in docs folder (#362) 2025-05-27 14:11:13 +02:00
jpgouin
2b1448ffb8 add air-gap support (#359)
* add airgap support
* add airgap howto guide
2025-05-27 10:13:07 +02:00
Enrico Candino
fdb5bb9c19 update version in readme (#357) 2025-05-19 20:27:23 +02:00
Hussein Galal
45fdbf9363 Fix DNS options and allow custom dnsConfig (#354)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-05-15 15:58:36 +03:00
Enrico Candino
3590b48d91 remove --devel reference (#353) 2025-05-15 12:07:08 +02:00
Enrico Candino
cca3d0c309 Rename ClusterSet to VirtualClusterPolicy (#349)
* rename clusterset to policy

* fixes
2025-05-15 12:04:47 +02:00
Hussein Galal
f228c4536c Fix annotation update (#335)
* Fix annotation update

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix annotation update

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix annotation update

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-05-12 13:02:05 +03:00
Hussein Galal
37fe4493e7 Fix HA init server scaling (#333)
* Fix HA init server scaling

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* increase timeout in e2e test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-05-12 11:51:35 +03:00
Enrico Candino
6a22f6f704 fix build-crds, bump Go to 1.24.2, bump golangci-lint (#344) 2025-05-06 17:24:35 +02:00
Enrico Candino
96a4341dfb Services updates (LoadBalancerConfig and NodePortConfig) (#329)
* updates to services

- added loadBalancerConfig
- removed service-port
- added logic to not expose services

* Refactor cluster tests to improve readability and maintainability

- Simplified service port expectations by directly accessing elements instead of using `ContainElement`.
- Enhanced clarity of test assertions for `k3s-server-port` and `k3s-etcd-port` attributes.
- Removed redundant code for checking service ports.

* fix ports for ingress expose, update kubeconfig generate
2025-04-22 11:52:18 +02:00
Hussein Galal
510ab4bb8a Add extra env for servers/agents (#324)
* Add extra env for servers/agents

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* cli docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix container env

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-04-21 12:25:51 +02:00
Hussein Galal
9d96ee1e9c Display error if agents flag is provided in shared mode (#330)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-04-18 17:48:43 +02:00
Enrico Candino
7c424821ca Update Chart.yaml (#332) 2025-04-18 12:43:14 +02:00
jpgouin
a2f5fd7592 Merge pull request #328 from takushi-35/ehemeral-docs
[DOC] fix: Incorrect creation method when using Ephemeral Storage in advanced-usage.md
2025-04-11 14:27:40 +02:00
takushi-35
c8df86b83b fix: Correcting incorrect procedures in advanced-usage.md 2025-04-10 13:27:00 +09:00
Hussein Galal
d41d2b8c31 Fix update bug in ensureObject (#325)
* Fix update bug in ensureObjects

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix log msg

Co-authored-by: Enrico Candino <enrico.candino@gmail.com>

* Fix import

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
Co-authored-by: Enrico Candino <enrico.candino@gmail.com>
2025-04-09 17:25:48 +02:00
Enrico Candino
7cb2399b89 Update and fix to k3kcli for new ClusterSet integration (#321)
* added clusterset flag to cluster creation and displayname to clusterset creation

* updated cli docs
2025-04-04 13:22:55 +02:00
Enrico Candino
90568f24b1 Added ClusterSet as singleton (#316)
* added ClusterSet as singleton

* fix tests
2025-04-03 16:26:25 +02:00
Hussein Galal
0843a9e313 Initial support for ResourceQuotas in clustersets (#308)
* Add ResourceQuota to clusterset

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Generate docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add a defualt limitRange for ClusterSets

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix linting

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add test for clusterset limitRange

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add server and worker limits

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make charts

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add default limits and fixes to resourcesquota

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make build-crds

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make build-crds

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make spec as pointer

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* delete default limit

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Update tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Update tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* return on delete in limitrange

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-04-03 12:30:48 +02:00
Enrico Candino
b58578788c Add clusterset commands (#319)
* added clusterset create command, small refactor with appcontext

* added clusterset delete

* updated docs
2025-04-03 11:07:32 +02:00
Enrico Candino
c4cc1e69cd requeue if server not ready (#318) 2025-04-03 10:45:18 +02:00
Enrico Candino
bd947c0fcb Create dedicated namespace for new clusters (#314)
* create dedicated namespace for new clusters

* porcelain test

* use --exit-code instead of test and shell for escaping issue

* update go.mod
2025-03-26 14:53:41 +01:00
Hussein Galal
b0b61f8d8e Fix delete cli (#281)
* Fix delete cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* update docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix delete cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* check if object has a controller reference before removing

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* move the update to the if condition

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* move the update to the if condition

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-03-24 13:44:00 +02:00
Enrico Candino
3281d54c6c fix typo (#300) 2025-03-24 10:48:42 +01:00
Hussein Galal
853b0a7e05 Chart update for 0.3.1 (#309)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-03-21 01:59:53 +02:00
143 changed files with 9354 additions and 4852 deletions

View File

@@ -1 +1,3 @@
release-name-template: chart-{{ .Version }}
make-release-latest: false
skip-existing: true

View File

@@ -33,5 +33,5 @@ jobs:
args: --clean --snapshot
env:
REPO: ${{ github.repository }}
REGISTRY:
REGISTRY: ""

View File

@@ -2,9 +2,6 @@ name: Chart
on:
workflow_dispatch:
push:
tags:
- "chart-*"
permissions:
contents: write
@@ -18,15 +15,6 @@ jobs:
with:
fetch-depth: 0
- name: Check tag
if: github.event_name == 'push'
run: |
pushed_tag=$(echo ${{ github.ref_name }} | sed "s/chart-//")
chart_tag=$(yq .version charts/k3k/Chart.yaml)
echo pushed_tag=${pushed_tag} chart_tag=${chart_tag}
[ "${pushed_tag}" == "${chart_tag}" ]
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"

302
.github/workflows/test-conformance.yaml vendored Normal file
View File

@@ -0,0 +1,302 @@
name: Conformance Tests
on:
schedule:
- cron: "0 1 * * *"
workflow_dispatch:
inputs:
test:
description: "Run specific test"
type: choice
options:
- conformance
- sig-api-machinery
- sig-apps
- sig-architecture
- sig-auth
- sig-cli
- sig-instrumentation
- sig-network
- sig-node
- sig-scheduling
- sig-storage
permissions:
contents: read
jobs:
conformance:
runs-on: ubuntu-latest
if: inputs.test == '' || inputs.test == 'conformance'
strategy:
fail-fast: false
matrix:
type:
- parallel
- serial
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install helm
uses: azure/setup-helm@v4.3.0
- name: Install hydrophone
run: go install sigs.k8s.io/hydrophone@latest
- name: Install k3d and kubectl
run: |
wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
k3d version
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
- name: Setup Kubernetes (k3d)
env:
REPO_NAME: k3k-registry
REPO_PORT: 12345
run: |
echo "127.0.0.1 ${REPO_NAME}" | sudo tee -a /etc/hosts
k3d registry create ${REPO_NAME} --port ${REPO_PORT}
k3d cluster create k3k --servers 3 \
-p "30000-30010:30000-30010@server:0" \
--registry-use k3d-${REPO_NAME}:${REPO_PORT}
kubectl cluster-info
kubectl get nodes
- name: Setup K3k
env:
REPO: k3k-registry:12345
run: |
echo "127.0.0.1 k3k-registry" | sudo tee -a /etc/hosts
make build
make package
make push
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
VERSION=$(make version)
k3d image import ${REPO}/k3k:${VERSION} -c k3k --verbose
k3d image import ${REPO}/k3k-kubelet:${VERSION} -c k3k --verbose
make install
echo "Wait for K3k controller to be available"
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
- name: Check k3kcli
run: k3kcli -v
- name: Create virtual cluster
run: |
kubectl create namespace k3k-mycluster
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: mycluster
namespace: k3k-mycluster
spec:
servers: 2
mirrorHostNodes: true
tlsSANs:
- "127.0.0.1"
expose:
nodePort:
serverPort: 30001
EOF
echo "Wait for bootstrap secret to be available"
kubectl wait -n k3k-mycluster --for=create secret k3k-mycluster-bootstrap --timeout=5m
k3kcli kubeconfig generate --name mycluster
export KUBECONFIG=${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml
kubectl cluster-info
kubectl get nodes
kubectl get pods -A
- name: Run conformance tests (parallel)
if: matrix.type == 'parallel'
run: |
# Run conformance tests in parallel mode (skipping serial)
hydrophone --conformance --parallel 4 --skip='\[Serial\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Run conformance tests (serial)
if: matrix.type == 'serial'
run: |
# Run serial conformance tests
hydrophone --focus='\[Serial\].*\[Conformance\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Archive conformance logs
uses: actions/upload-artifact@v4
if: always()
with:
name: conformance-${{ matrix.type }}-logs
path: /tmp/e2e.log
sigs:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
tests:
- name: sig-api-machinery
focus: '\[sig-api-machinery\].*\[Conformance\]'
- name: sig-apps
focus: '\[sig-apps\].*\[Conformance\]'
- name: sig-architecture
focus: '\[sig-architecture\].*\[Conformance\]'
- name: sig-auth
focus: '\[sig-auth\].*\[Conformance\]'
- name: sig-cli
focus: '\[sig-cli\].*\[Conformance\]'
- name: sig-instrumentation
focus: '\[sig-instrumentation\].*\[Conformance\]'
- name: sig-network
focus: '\[sig-network\].*\[Conformance\]'
- name: sig-node
focus: '\[sig-node\].*\[Conformance\]'
- name: sig-scheduling
focus: '\[sig-scheduling\].*\[Conformance\]'
- name: sig-storage
focus: '\[sig-storage\].*\[Conformance\]'
steps:
- name: Validate input and fail fast
if: inputs.test != '' && inputs.test != matrix.tests.name
run: |
echo "Failing this job as it's not the intended target."
exit 1
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install helm
uses: azure/setup-helm@v4.3.0
- name: Install hydrophone
run: go install sigs.k8s.io/hydrophone@latest
- name: Install k3d and kubectl
run: |
wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
k3d version
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
- name: Setup Kubernetes (k3d)
env:
REPO_NAME: k3k-registry
REPO_PORT: 12345
run: |
echo "127.0.0.1 ${REPO_NAME}" | sudo tee -a /etc/hosts
k3d registry create ${REPO_NAME} --port ${REPO_PORT}
k3d cluster create k3k --servers 3 \
-p "30000-30010:30000-30010@server:0" \
--registry-use k3d-${REPO_NAME}:${REPO_PORT}
kubectl cluster-info
kubectl get nodes
- name: Setup K3k
env:
REPO: k3k-registry:12345
run: |
echo "127.0.0.1 k3k-registry" | sudo tee -a /etc/hosts
make build
make package
make push
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
VERSION=$(make version)
k3d image import ${REPO}/k3k:${VERSION} -c k3k --verbose
k3d image import ${REPO}/k3k-kubelet:${VERSION} -c k3k --verbose
make install
echo "Wait for K3k controller to be available"
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
- name: Check k3kcli
run: k3kcli -v
- name: Create virtual cluster
run: |
kubectl create namespace k3k-mycluster
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: mycluster
namespace: k3k-mycluster
spec:
servers: 2
mirrorHostNodes: true
tlsSANs:
- "127.0.0.1"
expose:
nodePort:
serverPort: 30001
EOF
echo "Wait for bootstrap secret to be available"
kubectl wait -n k3k-mycluster --for=create secret k3k-mycluster-bootstrap --timeout=5m
k3kcli kubeconfig generate --name mycluster
export KUBECONFIG=${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml
kubectl cluster-info
kubectl get nodes
kubectl get pods -A
- name: Run sigs tests
run: |
FOCUS="${{ matrix.tests.focus }}"
echo "Running with --focus=${FOCUS}"
hydrophone --focus "${FOCUS}" \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Archive conformance logs
uses: actions/upload-artifact@v4
if: always()
with:
name: ${{ matrix.tests.name }}-logs
path: /tmp/e2e.log

View File

@@ -11,7 +11,7 @@ permissions:
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -21,12 +21,12 @@ jobs:
go-version-file: go.mod
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
uses: golangci/golangci-lint-action@v8
with:
args: --timeout=5m
version: v1.60
version: v2.3.0
tests:
validate:
runs-on: ubuntu-latest
steps:
@@ -36,15 +36,35 @@ jobs:
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Validate
run: make validate
tests:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Run unit tests
run: make test-unit
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: unit
tests-e2e:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
@@ -56,13 +76,10 @@ jobs:
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Validate
run: make validate
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Build and package
run: |
make build
@@ -77,16 +94,86 @@ jobs:
- name: Run e2e tests
run: make test-e2e
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: e2e
- name: Archive k3s logs
uses: actions/upload-artifact@v4
if: always()
with:
name: k3s-logs
name: e2e-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@v4
if: always()
with:
name: k3k-logs
name: e2e-k3k-logs
path: /tmp/k3k.log
tests-cli:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Set coverage environment
run: |
mkdir ${{ github.workspace }}/covdata
echo "COVERAGE=true" >> $GITHUB_ENV
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
- name: Build and package
run: |
make build
make package
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
- name: Check k3kcli
run: k3kcli -v
- name: Run cli tests
run: make test-cli
- name: Convert coverage data
run: go tool covdata textfmt -i=${{ github.workspace }}/covdata -o ${{ github.workspace }}/covdata/cover.out
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${{ github.workspace }}/covdata/cover.out
flags: cli
- name: Archive k3s logs
uses: actions/upload-artifact@v4
if: always()
with:
name: cli-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@v4
if: always()
with:
name: cli-k3k-logs
path: /tmp/k3k.log

3
.gitignore vendored
View File

@@ -8,3 +8,6 @@
__debug*
*-kubeconfig.yaml
.envtest
cover.out
covcounters.**
covmeta.**

View File

@@ -1,13 +1,27 @@
version: "2"
linters:
enable:
# default linters
- errcheck
- gosimple
- govet
- ineffassign
- staticcheck
- unused
- misspell
- wsl_v5
# extra
- misspell
- wsl
formatters:
enable:
- gci
- gofmt
- gofumpt
settings:
gci:
# The default order is `standard > default > custom > blank > dot > alias > localmodule`.
custom-order: true
sections:
- standard
- default
- alias
- localmodule
- dot
- blank
gofmt:
rewrite-rules:
- pattern: 'interface{}'
replacement: 'any'

View File

@@ -1,18 +1,18 @@
REPO ?= rancher
COVERAGE ?= false
VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*")
## Dependencies
GOLANGCI_LINT_VERSION := v1.63.4
CONTROLLER_TOOLS_VERSION ?= v0.14.0
GOLANGCI_LINT_VERSION := v2.3.0
GINKGO_VERSION ?= v2.21.0
ENVTEST_VERSION ?= latest
GINKGO_FLAGS ?= -v -r --coverprofile=cover.out --coverpkg=./...
ENVTEST_VERSION ?= v0.0.0-20250505003155-b6c5897febe5
ENVTEST_K8S_VERSION := 1.31.0
CRD_REF_DOCS_VER ?= v0.1.0
GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
CONTROLLER_GEN ?= go run sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)
GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
GINKGO ?= go run github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION)
CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
@@ -22,7 +22,7 @@ export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin
.PHONY: all
all: version build-crds build package ## Run 'make' or 'make all' to run 'version', 'build-crds', 'build' and 'package'
all: version generate build package ## Run 'make' or 'make all' to run 'version', 'generate', 'build' and 'package'
.PHONY: version
version: ## Print the current version
@@ -30,7 +30,7 @@ version: ## Print the current version
.PHONY: build
build: ## Build the the K3k binaries (k3k, k3k-kubelet and k3kcli)
@VERSION=$(VERSION) ./scripts/build
@VERSION=$(VERSION) COVERAGE=$(COVERAGE) ./scripts/build
.PHONY: package
package: package-k3k package-k3k-kubelet ## Package the k3k and k3k-kubelet Docker images
@@ -51,47 +51,56 @@ push-%:
docker push $(REPO)/$*:latest
docker push $(REPO)/$*:dev
.PHONY: test
test: ## Run all the tests
$(GINKGO) -v -r --label-filter=$(label-filter)
$(GINKGO) $(GINKGO_FLAGS) --label-filter=$(label-filter)
.PHONY: test-unit
test-unit: ## Run the unit tests (skips the e2e)
$(GINKGO) -v -r --skip-file=tests/*
$(GINKGO) $(GINKGO_FLAGS) --skip-file=tests/*
.PHONY: test-controller
test-controller: ## Run the controller tests (pkg/controller)
$(GINKGO) -v -r pkg/controller
$(GINKGO) $(GINKGO_FLAGS) pkg/controller
.PHONY: test-kubelet-controller
test-kubelet-controller: ## Run the controller tests (pkg/controller)
$(GINKGO) $(GINKGO_FLAGS) k3k-kubelet/controller
.PHONY: test-e2e
test-e2e: ## Run the e2e tests
$(GINKGO) -v -r tests
$(GINKGO) $(GINKGO_FLAGS) --label-filter=e2e tests
.PHONY: build-crds
build-crds: ## Build the CRDs specs
@# This will return non-zero until all of our objects in ./pkg/apis can generate valid crds.
@# allowDangerousTypes is needed for struct that use floats
$(CONTROLLER_GEN) crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=false \
paths=./pkg/apis/... \
output:crd:dir=./charts/k3k/crds
.PHONY: test-cli
test-cli: ## Run the cli tests
$(GINKGO) $(GINKGO_FLAGS) --label-filter=cli --flake-attempts=3 tests
.PHONY: generate
generate: ## Generate the CRDs specs
go generate ./...
.PHONY: docs
docs: ## Build the CRDs and CLI docs
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml --renderer=markdown --source-path=./pkg/apis/k3k.io/v1alpha1 --output-path=./docs/crds/crd-docs.md
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml \
--renderer=markdown \
--source-path=./pkg/apis/k3k.io/v1alpha1 \
--output-path=./docs/crds/crd-docs.md
@go run ./docs/cli/genclidoc.go
.PHONY: lint
lint: ## Find any linting issues in the project
$(GOLANGCI_LINT) run --timeout=5m
.PHONY: fmt
fmt: ## Find any linting issues in the project
$(GOLANGCI_LINT) fmt ./...
.PHONY: validate
validate: build-crds docs ## Validate the project checking for any dependency or doc mismatch
validate: generate docs fmt ## Validate the project checking for any dependency or doc mismatch
$(GINKGO) unfocus
go mod tidy
git --no-pager diff go.mod go.sum
test -z "$(shell git status --porcelain)"
git status --porcelain
git --no-pager diff --exit-code
.PHONY: install
install: ## Install K3k with Helm on the targeted Kubernetes cluster
@@ -104,4 +113,4 @@ install: ## Install K3k with Helm on the targeted Kubernetes cluster
.PHONY: help
help: ## Show this help.
@egrep -h '\s##\s' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m %-30s\033[0m %s\n", $$1, $$2}'
@egrep -h '\s##\s' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m %-30s\033[0m %s\n", $$1, $$2}'

View File

@@ -56,10 +56,10 @@ This section provides instructions on how to install K3k and the `k3kcli`.
2. Install the K3k controller:
```bash
helm install --namespace k3k-system --create-namespace k3k k3k/k3k --devel
helm install --namespace k3k-system --create-namespace k3k k3k/k3k
```
**NOTE:** K3k is currently under development, so the chart is marked as a development chart. This means you need to add the `--devel` flag to install it. For production use, keep an eye on releases for stable versions. We recommend using the latest released version when possible.
**NOTE:** K3k is currently under development. We recommend using the latest released version when possible.
### Install the `k3kcli`
@@ -71,7 +71,7 @@ To install it, simply download the latest available version for your architectur
For example, you can download the Linux amd64 version with:
```
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.0/k3kcli-linux-amd64 && \
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.4/k3kcli-linux-amd64 && \
chmod +x k3kcli && \
sudo mv k3kcli /usr/local/bin
```
@@ -79,7 +79,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.0/k3kcli-l
You should now be able to run:
```bash
-> % k3kcli --version
k3kcli Version: v0.3.0
k3kcli version v0.3.4
```

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.3.1-r1
appVersion: v0.3.1-rc1
version: 0.3.4
appVersion: v0.3.4

View File

@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
controller-gen.kubebuilder.io/version: v0.16.0
name: clusters.k3k.io
spec:
group: k3k.io
@@ -14,7 +14,17 @@ spec:
singular: cluster
scope: Namespaced
versions:
- name: v1alpha1
- additionalPrinterColumns:
- jsonPath: .spec.mode
name: Mode
type: string
- jsonPath: .status.phase
name: Status
type: string
- jsonPath: .status.policyName
name: Policy
type: string
name: v1alpha1
schema:
openAPIV3Schema:
description: |-
@@ -65,6 +75,125 @@ spec:
items:
type: string
type: array
agentEnvs:
description: AgentEnvs specifies list of environment variables to
set in the agent pod.
items:
description: EnvVar represents an environment variable present in
a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
type: string
value:
description: |-
Variable references $(VAR_NAME) are expanded
using the previously defined environment variables in the container and
any service environment variables. If a variable cannot be resolved,
the reference in the input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
"$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
Escaped references will never be expanded, regardless of whether the variable
exists or not.
Defaults to "".
type: string
valueFrom:
description: Source for the environment variable's value. Cannot
be used if value is not empty.
properties:
configMapKeyRef:
description: Selects a key of a ConfigMap.
properties:
key:
description: The key to select.
type: string
name:
default: ""
description: |-
Name of the referent.
This field is effectively required, but due to backwards compatibility is
allowed to be empty. Instances of this type with an empty value here are
almost certainly wrong.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
type: string
optional:
description: Specify whether the ConfigMap or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
fieldRef:
description: |-
Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['<KEY>']`, `metadata.annotations['<KEY>']`,
spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
properties:
apiVersion:
description: Version of the schema the FieldPath is
written in terms of, defaults to "v1".
type: string
fieldPath:
description: Path of the field to select in the specified
API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
properties:
containerName:
description: 'Container name: required for volumes,
optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format of the exposed
resources, defaults to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
secretKeyRef:
description: Selects a key of a secret in the pod's namespace
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
default: ""
description: |-
Name of the referent.
This field is effectively required, but due to backwards compatibility is
allowed to be empty. Instances of this type with an empty value here are
almost certainly wrong.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
required:
- name
type: object
type: array
agents:
default: 0
description: |-
@@ -94,27 +223,87 @@ spec:
x-kubernetes-validations:
- message: clusterDNS is immutable
rule: self == oldSelf
clusterLimit:
description: Limit defines resource limits for server/agent nodes.
customCAs:
description: CustomCAs specifies the cert/key pairs for custom CA
certificates.
properties:
serverLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: ServerLimit specifies resource limits for server
nodes.
type: object
workerLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: WorkerLimit specifies resource limits for agent nodes.
enabled:
description: Enabled toggles this feature on or off.
type: boolean
sources:
description: Sources defines the sources for all required custom
CA certificates.
properties:
clientCA:
description: ClientCA specifies the client-ca cert/key pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
type: object
etcdPeerCA:
description: ETCDPeerCA specifies the etcd-peer-ca cert/key
pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
type: object
etcdServerCA:
description: ETCDServerCA specifies the etcd-server-ca cert/key
pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
type: object
requestHeaderCA:
description: RequestHeaderCA specifies the request-header-ca
cert/key pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
type: object
serverCA:
description: ServerCA specifies the server-ca cert/key pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
type: object
serviceAccountToken:
description: ServiceAccountToken specifies the service-account-token
key.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
type: object
type: object
type: object
expose:
@@ -140,6 +329,21 @@ spec:
loadbalancer:
description: LoadBalancer specifies options for exposing the API
server through a LoadBalancer service.
properties:
etcdPort:
description: |-
ETCDPort is the port on which the ETCD service is exposed when type is LoadBalancer.
If not specified, the default etcd 2379 port will be allocated.
If 0 or negative, the port will not be exposed.
format: int32
type: integer
serverPort:
description: |-
ServerPort is the port on which the K3s server is exposed when type is LoadBalancer.
If not specified, the default https 443 port will be allocated.
If 0 or negative, the port will not be exposed.
format: int32
type: integer
type: object
nodePort:
description: NodePort specifies options for exposing the API server
@@ -148,23 +352,24 @@ spec:
etcdPort:
description: |-
ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
If not specified, a port will be allocated (default: 30000-32767).
If not specified, a random port between 30000-32767 will be allocated.
If out of range, the port will not be exposed.
format: int32
type: integer
serverPort:
description: |-
ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.
If not specified, a port will be allocated (default: 30000-32767).
format: int32
type: integer
servicePort:
description: |-
ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.
If not specified, a port will be allocated (default: 30000-32767).
ServerPort is the port on each node on which the K3s server is exposed when type is NodePort.
If not specified, a random port between 30000-32767 will be allocated.
If out of range, the port will not be exposed.
format: int32
type: integer
type: object
type: object
mirrorHostNodes:
description: |-
MirrorHostNodes controls whether node objects from the host cluster
are mirrored into the virtual cluster.
type: boolean
mode:
allOf:
- enum:
@@ -189,8 +394,6 @@ spec:
In "shared" mode, this also applies to workloads.
type: object
persistence:
default:
type: dynamic
description: |-
Persistence specifies options for persisting etcd data.
Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.
@@ -202,6 +405,7 @@ spec:
This field is only relevant in "dynamic" mode.
type: string
storageRequestSize:
default: 1G
description: |-
StorageRequestSize is the requested size for the PVC.
This field is only relevant in "dynamic" mode.
@@ -210,8 +414,6 @@ spec:
default: dynamic
description: Type specifies the persistence mode.
type: string
required:
- type
type: object
priorityClass:
description: |-
@@ -225,6 +427,134 @@ spec:
items:
type: string
type: array
serverEnvs:
description: ServerEnvs specifies list of environment variables to
set in the server pod.
items:
description: EnvVar represents an environment variable present in
a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
type: string
value:
description: |-
Variable references $(VAR_NAME) are expanded
using the previously defined environment variables in the container and
any service environment variables. If a variable cannot be resolved,
the reference in the input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
"$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
Escaped references will never be expanded, regardless of whether the variable
exists or not.
Defaults to "".
type: string
valueFrom:
description: Source for the environment variable's value. Cannot
be used if value is not empty.
properties:
configMapKeyRef:
description: Selects a key of a ConfigMap.
properties:
key:
description: The key to select.
type: string
name:
default: ""
description: |-
Name of the referent.
This field is effectively required, but due to backwards compatibility is
allowed to be empty. Instances of this type with an empty value here are
almost certainly wrong.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
type: string
optional:
description: Specify whether the ConfigMap or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
fieldRef:
description: |-
Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['<KEY>']`, `metadata.annotations['<KEY>']`,
spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
properties:
apiVersion:
description: Version of the schema the FieldPath is
written in terms of, defaults to "v1".
type: string
fieldPath:
description: Path of the field to select in the specified
API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
properties:
containerName:
description: 'Container name: required for volumes,
optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format of the exposed
resources, defaults to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
secretKeyRef:
description: Selects a key of a secret in the pod's namespace
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
default: ""
description: |-
Name of the referent.
This field is effectively required, but due to backwards compatibility is
allowed to be empty. Instances of this type with an empty value here are
almost certainly wrong.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
required:
- name
type: object
type: array
serverLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: ServerLimit specifies resource limits for server nodes.
type: object
servers:
default: 1
description: |-
@@ -271,8 +601,18 @@ spec:
It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).
If not specified, the Kubernetes version of the host node will be used.
type: string
workerLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: WorkerLimit specifies resource limits for agent nodes.
type: object
type: object
status:
default: {}
description: Status reflects the observed state of the Cluster.
properties:
clusterCIDR:
@@ -281,29 +621,87 @@ spec:
clusterDNS:
description: ClusterDNS is the IP address for the CoreDNS service.
type: string
conditions:
description: Conditions are the individual conditions for the cluster
set.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
hostVersion:
description: HostVersion is the Kubernetes version of the host node.
type: string
persistence:
description: Persistence specifies options for persisting etcd data.
properties:
storageClassName:
description: |-
StorageClassName is the name of the StorageClass to use for the PVC.
This field is only relevant in "dynamic" mode.
type: string
storageRequestSize:
description: |-
StorageRequestSize is the requested size for the PVC.
This field is only relevant in "dynamic" mode.
type: string
type:
default: dynamic
description: Type specifies the persistence mode.
type: string
required:
- type
type: object
kubeletPort:
description: KubeletPort specefies the port used by k3k-kubelet in
shared mode.
type: integer
phase:
default: Unknown
description: Phase is a high-level summary of the cluster's current
lifecycle state.
enum:
- Pending
- Provisioning
- Ready
- Failed
- Terminating
- Unknown
type: string
policyName:
description: PolicyName specifies the virtual cluster policy name
bound to the virtual cluster.
type: string
serviceCIDR:
description: ServiceCIDR is the CIDR range for service IPs.
type: string
@@ -313,6 +711,10 @@ spec:
items:
type: string
type: array
webhookPort:
description: WebhookPort specefies the port used by webhook in k3k-kubelet
in shared mode.
type: integer
type: object
type: object
served: true

View File

@@ -1,212 +0,0 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
name: clustersets.k3k.io
spec:
group: k3k.io
names:
kind: ClusterSet
listKind: ClusterSetList
plural: clustersets
singular: clusterset
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: |-
ClusterSet represents a group of virtual Kubernetes clusters managed by k3k.
It allows defining common configurations and constraints for the clusters within the set.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
default: {}
description: Spec defines the desired state of the ClusterSet.
properties:
allowedNodeTypes:
default:
- shared
description: AllowedNodeTypes specifies the allowed cluster provisioning
modes. Defaults to [shared].
items:
description: ClusterMode is the possible provisioning mode of a
Cluster.
enum:
- shared
- virtual
type: string
minItems: 1
type: array
x-kubernetes-validations:
- message: mode is immutable
rule: self == oldSelf
defaultLimits:
description: DefaultLimits specifies the default resource limits for
servers/agents when a cluster in the set doesn't provide any.
properties:
serverLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: ServerLimit specifies resource limits for server
nodes.
type: object
workerLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: WorkerLimit specifies resource limits for agent nodes.
type: object
type: object
defaultNodeSelector:
additionalProperties:
type: string
description: DefaultNodeSelector specifies the node selector that
applies to all clusters (server + agent) in the set.
type: object
defaultPriorityClass:
description: DefaultPriorityClass specifies the priorityClassName
applied to all pods of all clusters in the set.
type: string
disableNetworkPolicy:
description: DisableNetworkPolicy indicates whether to disable the
creation of a default network policy for cluster isolation.
type: boolean
maxLimits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: MaxLimits specifies the maximum resource limits that
apply to all clusters (server + agent) in the set.
type: object
podSecurityAdmissionLevel:
description: PodSecurityAdmissionLevel specifies the pod security
admission level applied to the pods in the namespace.
enum:
- privileged
- baseline
- restricted
type: string
type: object
status:
description: Status reflects the observed state of the ClusterSet.
properties:
conditions:
description: Conditions are the individual conditions for the cluster
set.
items:
description: "Condition contains details for one aspect of the current
state of this API Resource.\n---\nThis struct is intended for
direct use as an array at the field path .status.conditions. For
example,\n\n\n\ttype FooStatus struct{\n\t // Represents the
observations of a foo's current state.\n\t // Known .status.conditions.type
are: \"Available\", \"Progressing\", and \"Degraded\"\n\t //
+patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t
\ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t
\ // other fields\n\t}"
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: |-
type of condition in CamelCase or in foo.example.com/CamelCase.
---
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
useful (see .node.status.conditions), the ability to deconflict is important.
The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
lastUpdateTime:
description: LastUpdate is the timestamp when the status was last
updated.
type: string
observedGeneration:
description: ObservedGeneration was the generation at the time the
status was updated.
format: int64
type: integer
summary:
description: Summary is a summary of the status.
type: string
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -0,0 +1,310 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.0
name: virtualclusterpolicies.k3k.io
spec:
group: k3k.io
names:
kind: VirtualClusterPolicy
listKind: VirtualClusterPolicyList
plural: virtualclusterpolicies
shortNames:
- vcp
singular: virtualclusterpolicy
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .spec.allowedMode
name: Mode
type: string
name: v1alpha1
schema:
openAPIV3Schema:
description: |-
VirtualClusterPolicy allows defining common configurations and constraints
for clusters within a clusterpolicy.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
default: {}
description: Spec defines the desired state of the VirtualClusterPolicy.
properties:
allowedMode:
default: shared
description: AllowedMode specifies the allowed cluster provisioning
mode. Defaults to "shared".
enum:
- shared
- virtual
type: string
x-kubernetes-validations:
- message: mode is immutable
rule: self == oldSelf
defaultNodeSelector:
additionalProperties:
type: string
description: DefaultNodeSelector specifies the node selector that
applies to all clusters (server + agent) in the target Namespace.
type: object
defaultPriorityClass:
description: DefaultPriorityClass specifies the priorityClassName
applied to all pods of all clusters in the target Namespace.
type: string
disableNetworkPolicy:
description: DisableNetworkPolicy indicates whether to disable the
creation of a default network policy for cluster isolation.
type: boolean
limit:
description: |-
Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy
to set defaults and constraints (min/max)
properties:
limits:
description: Limits is the list of LimitRangeItem objects that
are enforced.
items:
description: LimitRangeItem defines a min/max usage limit for
any resource that matches on kind.
properties:
default:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Default resource requirement limit value by
resource name if resource limit is omitted.
type: object
defaultRequest:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: DefaultRequest is the default resource requirement
request value by resource name if resource request is
omitted.
type: object
max:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Max usage constraints on this kind by resource
name.
type: object
maxLimitRequestRatio:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: MaxLimitRequestRatio if specified, the named
resource must have a request and limit that are both non-zero
where limit divided by request is less than or equal to
the enumerated value; this represents the max burst for
the named resource.
type: object
min:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Min usage constraints on this kind by resource
name.
type: object
type:
description: Type of resource that this limit applies to.
type: string
required:
- type
type: object
type: array
x-kubernetes-list-type: atomic
required:
- limits
type: object
podSecurityAdmissionLevel:
description: PodSecurityAdmissionLevel specifies the pod security
admission level applied to the pods in the namespace.
enum:
- privileged
- baseline
- restricted
type: string
quota:
description: Quota specifies the resource limits for clusters within
a clusterpolicy.
properties:
hard:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: |-
hard is the set of desired hard limits for each named resource.
More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
type: object
scopeSelector:
description: |-
scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
but expressed using ScopeSelectorOperator in combination with possible values.
For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
properties:
matchExpressions:
description: A list of scope selector requirements by scope
of the resources.
items:
description: |-
A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
that relates the scope name and values.
properties:
operator:
description: |-
Represents a scope's relationship to a set of values.
Valid operators are In, NotIn, Exists, DoesNotExist.
type: string
scopeName:
description: The name of the scope that the selector
applies to.
type: string
values:
description: |-
An array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty.
This array is replaced during a strategic merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- operator
- scopeName
type: object
type: array
x-kubernetes-list-type: atomic
type: object
x-kubernetes-map-type: atomic
scopes:
description: |-
A collection of filters that must match each object tracked by a quota.
If not specified, the quota matches all objects.
items:
description: A ResourceQuotaScope defines a filter that must
match each object tracked by a quota
type: string
type: array
x-kubernetes-list-type: atomic
type: object
type: object
status:
description: Status reflects the observed state of the VirtualClusterPolicy.
properties:
conditions:
description: Conditions are the individual conditions for the cluster
set.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
lastUpdateTime:
description: LastUpdate is the timestamp when the status was last
updated.
type: string
observedGeneration:
description: ObservedGeneration was the generation at the time the
status was updated.
format: int64
type: integer
summary:
description: Summary is a summary of the status.
type: string
type: object
required:
- metadata
- spec
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -26,6 +26,21 @@ spec:
value: "{{ .Values.sharedAgent.image.repository }}:{{ default .Chart.AppVersion .Values.sharedAgent.image.tag }}"
- name: SHARED_AGENT_PULL_POLICY
value: {{ .Values.sharedAgent.image.pullPolicy }}
- name: K3S_IMAGE
value: {{ .Values.k3sServer.image.repository }}
- name: K3S_IMAGE_PULL_POLICY
value: {{ .Values.k3sServer.image.pullPolicy }}
- name: KUBELET_PORT_RANGE
value: {{ .Values.sharedAgent.kubeletPortRange }}
- name: WEBHOOK_PORT_RANGE
value: {{ .Values.sharedAgent.webhookPortRange }}
- name: CONTROLLER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.extraEnv }}
{{- toYaml . | nindent 10 }}
{{- end }}
ports:
- containerPort: 8080
name: https

View File

@@ -16,7 +16,7 @@ subjects:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "k3k.fullname" . }}-node-proxy
name: k3k-kubelet-node
rules:
- apiGroups:
- ""
@@ -30,8 +30,29 @@ rules:
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "k3k.fullname" . }}-node-proxy
name: k3k-kubelet-node
roleRef:
kind: ClusterRole
name: {{ include "k3k.fullname" . }}-node-proxy
name: k3k-kubelet-node
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: k3k-priorityclass
rules:
- apiGroups:
- "scheduling.k8s.io"
resources:
- "priorityclasses"
verbs:
- "*"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: k3k-priorityclass
roleRef:
kind: ClusterRole
name: k3k-priorityclass
apiGroup: rbac.authorization.k8s.io

View File

@@ -9,8 +9,21 @@ imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
# extraEnv allows you to specify additional environment variables for the k3k controller deployment.
# This is useful for passing custom configuration or secrets to the controller.
# For example:
# extraEnv:
# - name: MY_CUSTOM_VAR
# value: "my_custom_value"
# - name: ANOTHER_VAR
# valueFrom:
# secretKeyRef:
# name: my-secret
# key: my-key
extraEnv: []
host:
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy for clustersets, if not set
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set
# the controller will collect the PodCIDRs of all the nodes on the system.
clusterCIDR: ""
@@ -23,7 +36,16 @@ serviceAccount:
# configuration related to the shared agent mode in k3k
sharedAgent:
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
kubeletPortRange: "50000-51000"
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
webhookPortRange: "51001-52000"
image:
repository: "rancher/k3k-kubelet"
tag: ""
pullPolicy: ""
# image registry configuration related to the k3s server
k3sServer:
image:
repository: "rancher/k3s"
pullPolicy: ""

View File

@@ -1,16 +1,20 @@
package cmds
import (
"github.com/urfave/cli/v2"
"github.com/spf13/cobra"
)
func NewClusterCommand() *cli.Command {
return &cli.Command{
Name: "cluster",
Usage: "cluster command",
Subcommands: []*cli.Command{
NewClusterCreateCmd(),
NewClusterDeleteCmd(),
},
func NewClusterCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "cluster",
Short: "cluster command",
}
cmd.AddCommand(
NewClusterCreateCmd(appCtx),
NewClusterDeleteCmd(appCtx),
NewClusterListCmd(appCtx),
)
return cmd
}

View File

@@ -3,26 +3,28 @@ package cmds
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
)
type CreateConfig struct {
@@ -31,80 +33,93 @@ type CreateConfig struct {
serviceCIDR string
servers int
agents int
serverArgs cli.StringSlice
agentArgs cli.StringSlice
serverArgs []string
agentArgs []string
serverEnvs []string
agentEnvs []string
persistenceType string
storageClassName string
storageRequestSize string
version string
mode string
kubeconfigServerHost string
policy string
mirrorHostNodes bool
customCertsPath string
}
func NewClusterCreateCmd() *cli.Command {
func NewClusterCreateCmd(appCtx *AppContext) *cobra.Command {
createConfig := &CreateConfig{}
createFlags := NewCreateFlags(createConfig)
return &cli.Command{
Name: "create",
Usage: "Create new cluster",
UsageText: "k3kcli cluster create [command options] NAME",
Action: createAction(createConfig),
Flags: append(CommonFlags, createFlags...),
HideHelpCommand: true,
cmd := &cobra.Command{
Use: "create",
Short: "Create new cluster",
Example: "k3kcli cluster create [command options] NAME",
PreRunE: func(cmd *cobra.Command, args []string) error {
return validateCreateConfig(createConfig)
},
RunE: createAction(appCtx, createConfig),
Args: cobra.ExactArgs(1),
}
CobraFlagNamespace(appCtx, cmd.Flags())
createFlags(cmd, createConfig)
return cmd
}
func createAction(config *CreateConfig) cli.ActionFunc {
return func(clx *cli.Context) error {
func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
name := args[0]
if clx.NArg() != 1 {
return cli.ShowSubcommandHelp(clx)
}
name := clx.Args().First()
if name == k3kcluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
restConfig, err := loadRESTConfig()
if err != nil {
return err
if config.mode == string(v1alpha1.SharedClusterMode) && config.agents != 0 {
return errors.New("invalid flag, --agents flag is only allowed in virtual mode")
}
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
namespace := appCtx.Namespace(name)
if err := createNamespace(ctx, client, namespace, config.policy); err != nil {
return err
}
if strings.Contains(config.version, "+") {
orig := config.version
config.version = strings.Replace(config.version, "+", "-", -1)
config.version = strings.ReplaceAll(config.version, "+", "-")
logrus.Warnf("Invalid K3s docker reference version: '%s'. Using '%s' instead", orig, config.version)
}
if config.token != "" {
logrus.Infof("Creating cluster token secret")
logrus.Info("Creating cluster token secret")
obj := k3kcluster.TokenSecretObj(config.token, name, Namespace())
obj := k3kcluster.TokenSecretObj(config.token, name, namespace)
if err := ctrlClient.Create(ctx, &obj); err != nil {
if err := client.Create(ctx, &obj); err != nil {
return err
}
}
logrus.Infof("Creating a new cluster [%s]", name)
if config.customCertsPath != "" {
if err := CreateCustomCertsSecrets(ctx, name, namespace, config.customCertsPath, client); err != nil {
return err
}
}
cluster := newCluster(name, Namespace(), config)
logrus.Infof("Creating cluster [%s] in namespace [%s]", name, namespace)
cluster := newCluster(name, namespace, config)
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
}
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
url, err := url.Parse(restConfig.Host)
url, err := url.Parse(appCtx.RestConfig.Host)
if err != nil {
return err
}
@@ -116,7 +131,7 @@ func createAction(config *CreateConfig) cli.ActionFunc {
cluster.Spec.TLSSANs = []string{host[0]}
if err := ctrlClient.Create(ctx, cluster); err != nil {
if err := client.Create(ctx, cluster); err != nil {
if apierrors.IsAlreadyExists(err) {
logrus.Infof("Cluster [%s] already exists", name)
} else {
@@ -124,9 +139,13 @@ func createAction(config *CreateConfig) cli.ActionFunc {
}
}
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
logrus.Infof("Waiting for cluster to be available..")
logrus.Infof("waiting for cluster to be available..")
if err := waitForCluster(ctx, client, cluster); err != nil {
return fmt.Errorf("failed to wait for cluster to become ready (status: %s): %w", cluster.Status.Phase, err)
}
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
// retry every 5s for at most 2m, or 25 times
availableBackoff := wait.Backoff{
@@ -140,29 +159,13 @@ func createAction(config *CreateConfig) cli.ActionFunc {
var kubeconfig *clientcmdapi.Config
if err := retry.OnError(availableBackoff, apierrors.IsNotFound, func() error {
kubeconfig, err = cfg.Extract(ctx, ctrlClient, cluster, host[0])
kubeconfig, err = cfg.Generate(ctx, client, cluster, host[0], 0)
return err
}); err != nil {
return err
}
pwd, err := os.Getwd()
if err != nil {
return err
}
logrus.Infof(`You can start using the cluster with:
export KUBECONFIG=%s
kubectl cluster-info
`, filepath.Join(pwd, cluster.Name+"-kubeconfig.yaml"))
kubeconfigData, err := clientcmd.Write(*kubeconfig)
if err != nil {
return err
}
return os.WriteFile(cluster.Name+"-kubeconfig.yaml", kubeconfigData, 0644)
return writeKubeconfigFile(cluster, kubeconfig, "")
}
}
@@ -181,14 +184,18 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
Agents: ptr.To(int32(config.agents)),
ClusterCIDR: config.clusterCIDR,
ServiceCIDR: config.serviceCIDR,
ServerArgs: config.serverArgs.Value(),
AgentArgs: config.agentArgs.Value(),
ServerArgs: config.serverArgs,
AgentArgs: config.agentArgs,
ServerEnvs: env(config.serverEnvs),
AgentEnvs: env(config.agentEnvs),
Version: config.version,
Mode: v1alpha1.ClusterMode(config.mode),
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.PersistenceMode(config.persistenceType),
StorageClassName: ptr.To(config.storageClassName),
Type: v1alpha1.PersistenceMode(config.persistenceType),
StorageClassName: ptr.To(config.storageClassName),
StorageRequestSize: config.storageRequestSize,
},
MirrorHostNodes: config.mirrorHostNodes,
},
}
if config.storageClassName == "" {
@@ -202,5 +209,135 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
}
}
if config.customCertsPath != "" {
cluster.Spec.CustomCAs = v1alpha1.CustomCAs{
Enabled: true,
Sources: v1alpha1.CredentialSources{
ClientCA: v1alpha1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "client-ca"),
},
ServerCA: v1alpha1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "server-ca"),
},
ETCDServerCA: v1alpha1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-server-ca"),
},
ETCDPeerCA: v1alpha1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-peer-ca"),
},
RequestHeaderCA: v1alpha1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "request-header-ca"),
},
ServiceAccountToken: v1alpha1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "service-account-token"),
},
},
}
}
return cluster
}
func env(envSlice []string) []v1.EnvVar {
var envVars []v1.EnvVar
for _, env := range envSlice {
keyValue := strings.Split(env, "=")
if len(keyValue) != 2 {
logrus.Fatalf("incorrect value for environment variable %s", env)
}
envVars = append(envVars, v1.EnvVar{
Name: keyValue[0],
Value: keyValue[1],
})
}
return envVars
}
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1alpha1.Cluster) error {
interval := 5 * time.Second
timeout := 2 * time.Minute
return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
key := client.ObjectKeyFromObject(cluster)
if err := k8sClient.Get(ctx, key, cluster); err != nil {
return false, fmt.Errorf("failed to get resource: %w", err)
}
// If resource ready -> stop polling
if cluster.Status.Phase == v1alpha1.ClusterReady {
return true, nil
}
// If resource failed -> stop polling with an error
if cluster.Status.Phase == v1alpha1.ClusterFailed {
return true, fmt.Errorf("cluster creation failed: %s", cluster.Status.Phase)
}
// Condition not met, continue polling.
return false, nil
})
}
func CreateCustomCertsSecrets(ctx context.Context, name, namespace, customCertsPath string, k8sclient client.Client) error {
customCAsMap := map[string]string{
"etcd-peer-ca": "/etcd/peer-ca",
"etcd-server-ca": "/etcd/server-ca",
"server-ca": "/server-ca",
"client-ca": "/client-ca",
"request-header-ca": "/request-header-ca",
"service-account-token": "/service",
}
for certName, fileName := range customCAsMap {
var (
certFilePath, keyFilePath string
cert, key []byte
err error
)
if certName != "service-account-token" {
certFilePath = customCertsPath + fileName + ".crt"
cert, err = os.ReadFile(certFilePath)
if err != nil {
return err
}
}
keyFilePath = customCertsPath + fileName + ".key"
key, err = os.ReadFile(keyFilePath)
if err != nil {
return err
}
certSecret := caCertSecret(certName, name, namespace, cert, key)
if err := k8sclient.Create(ctx, certSecret); err != nil {
return client.IgnoreAlreadyExists(err)
}
}
return nil
}
func caCertSecret(certName, clusterName, clusterNamespace string, cert, key []byte) *v1.Secret {
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: controller.SafeConcatNameWithPrefix(clusterName, certName),
Namespace: clusterNamespace,
},
Type: v1.SecretTypeTLS,
Data: map[string][]byte{
v1.TLSCertKey: cert,
v1.TLSPrivateKeyKey: key,
},
}
}

View File

@@ -3,96 +3,59 @@ package cmds
import (
"errors"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/urfave/cli/v2"
)
func NewCreateFlags(config *CreateConfig) []cli.Flag {
return []cli.Flag{
&cli.IntFlag{
Name: "servers",
Usage: "number of servers",
Destination: &config.servers,
Value: 1,
Action: func(ctx *cli.Context, value int) error {
if value <= 0 {
return errors.New("invalid number of servers")
}
return nil
},
},
&cli.IntFlag{
Name: "agents",
Usage: "number of agents",
Destination: &config.agents,
},
&cli.StringFlag{
Name: "token",
Usage: "token of the cluster",
Destination: &config.token,
},
&cli.StringFlag{
Name: "cluster-cidr",
Usage: "cluster CIDR",
Destination: &config.clusterCIDR,
},
&cli.StringFlag{
Name: "service-cidr",
Usage: "service CIDR",
Destination: &config.serviceCIDR,
},
&cli.StringFlag{
Name: "persistence-type",
Usage: "persistence mode for the nodes (dynamic, ephemeral, static)",
Value: string(v1alpha1.DynamicPersistenceMode),
Destination: &config.persistenceType,
Action: func(ctx *cli.Context, value string) error {
switch v1alpha1.PersistenceMode(value) {
case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode:
return nil
default:
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
}
},
},
&cli.StringFlag{
Name: "storage-class-name",
Usage: "storage class name for dynamic persistence type",
Destination: &config.storageClassName,
},
&cli.StringSliceFlag{
Name: "server-args",
Usage: "servers extra arguments",
Destination: &config.serverArgs,
},
&cli.StringSliceFlag{
Name: "agent-args",
Usage: "agents extra arguments",
Destination: &config.agentArgs,
},
&cli.StringFlag{
Name: "version",
Usage: "k3s version",
Destination: &config.version,
},
&cli.StringFlag{
Name: "mode",
Usage: "k3k mode type (shared, virtual)",
Destination: &config.mode,
Value: "shared",
Action: func(ctx *cli.Context, value string) error {
switch value {
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)
}
},
},
&cli.StringFlag{
Name: "kubeconfig-server",
Usage: "override the kubeconfig server host",
Destination: &config.kubeconfigServerHost,
},
}
func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
cmd.Flags().IntVar(&cfg.servers, "servers", 1, "number of servers")
cmd.Flags().IntVar(&cfg.agents, "agents", 0, "number of agents")
cmd.Flags().StringVar(&cfg.token, "token", "", "token of the cluster")
cmd.Flags().StringVar(&cfg.clusterCIDR, "cluster-cidr", "", "cluster CIDR")
cmd.Flags().StringVar(&cfg.serviceCIDR, "service-cidr", "", "service CIDR")
cmd.Flags().BoolVar(&cfg.mirrorHostNodes, "mirror-host-nodes", false, "Mirror Host Cluster Nodes")
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1alpha1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral, static)")
cmd.Flags().StringVar(&cfg.storageClassName, "storage-class-name", "", "storage class name for dynamic persistence type")
cmd.Flags().StringVar(&cfg.storageRequestSize, "storage-request-size", "", "storage size for dynamic persistence type")
cmd.Flags().StringSliceVar(&cfg.serverArgs, "server-args", []string{}, "servers extra arguments")
cmd.Flags().StringSliceVar(&cfg.agentArgs, "agent-args", []string{}, "agents extra arguments")
cmd.Flags().StringSliceVar(&cfg.serverEnvs, "server-envs", []string{}, "servers extra Envs")
cmd.Flags().StringSliceVar(&cfg.agentEnvs, "agent-envs", []string{}, "agents extra Envs")
cmd.Flags().StringVar(&cfg.version, "version", "", "k3s version")
cmd.Flags().StringVar(&cfg.mode, "mode", "shared", "k3k mode type (shared, virtual)")
cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host")
cmd.Flags().StringVar(&cfg.policy, "policy", "", "The policy to create the cluster in")
cmd.Flags().StringVar(&cfg.customCertsPath, "custom-certs", "", "The path for custom certificate directory")
}
func validateCreateConfig(cfg *CreateConfig) error {
if cfg.servers <= 0 {
return errors.New("invalid number of servers")
}
if cfg.persistenceType != "" {
switch v1alpha1.PersistenceMode(cfg.persistenceType) {
case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode:
return nil
default:
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
}
}
if _, err := resource.ParseQuantity(cfg.storageRequestSize); err != nil {
return errors.New(`invalid storage size, should be a valid resource quantity e.g "10Gi"`)
}
if cfg.mode != "" {
switch cfg.mode {
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)
}
}
return nil
}

View File

@@ -4,57 +4,112 @@ import (
"context"
"errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
func NewClusterDeleteCmd() *cli.Command {
return &cli.Command{
Name: "delete",
Usage: "Delete an existing cluster",
UsageText: "k3kcli cluster delete [command options] NAME",
Action: delete,
Flags: CommonFlags,
HideHelpCommand: true,
var keepData bool
func NewClusterDeleteCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "delete",
Short: "Delete an existing cluster",
Example: "k3kcli cluster delete [command options] NAME",
RunE: delete(appCtx),
Args: cobra.ExactArgs(1),
}
CobraFlagNamespace(appCtx, cmd.Flags())
cmd.Flags().BoolVar(&keepData, "keep-data", false, "keeps persistence volumes created for the cluster after deletion")
return cmd
}
func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
name := args[0]
if name == k3kcluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
namespace := appCtx.Namespace(name)
logrus.Infof("Deleting [%s] cluster in namespace [%s]", name, namespace)
cluster := v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
// keep bootstrap secrets and tokens if --keep-data flag is passed
if keepData {
// skip removing tokenSecret
if err := RemoveOwnerReferenceFromSecret(ctx, k3kcluster.TokenSecretName(cluster.Name), client, cluster); err != nil {
return err
}
// skip removing webhook secret
if err := RemoveOwnerReferenceFromSecret(ctx, agent.WebhookSecretName(cluster.Name), client, cluster); err != nil {
return err
}
} else {
matchingLabels := ctrlclient.MatchingLabels(map[string]string{"cluster": cluster.Name, "role": "server"})
listOpts := ctrlclient.ListOptions{Namespace: cluster.Namespace}
matchingLabels.ApplyToList(&listOpts)
deleteOpts := &ctrlclient.DeleteAllOfOptions{ListOptions: listOpts}
if err := client.DeleteAllOf(ctx, &v1.PersistentVolumeClaim{}, deleteOpts); err != nil {
return ctrlclient.IgnoreNotFound(err)
}
}
if err := client.Delete(ctx, &cluster); err != nil {
return ctrlclient.IgnoreNotFound(err)
}
return nil
}
}
func delete(clx *cli.Context) error {
ctx := context.Background()
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1alpha1.Cluster) error {
var secret v1.Secret
if clx.NArg() != 1 {
return cli.ShowSubcommandHelp(clx)
key := types.NamespacedName{
Name: name,
Namespace: cluster.Namespace,
}
name := clx.Args().First()
if name == k3kcluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
if err := cl.Get(ctx, key, &secret); err != nil {
if apierrors.IsNotFound(err) {
logrus.Warnf("%s secret is not found", name)
return nil
}
restConfig, err := loadRESTConfig()
if err != nil {
return err
}
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
if controllerutil.HasControllerReference(&secret) {
if err := controllerutil.RemoveOwnerReference(&cluster, &secret, cl.Scheme()); err != nil {
return err
}
return cl.Update(ctx, &secret)
}
logrus.Infof("deleting [%s] cluster", name)
cluster := v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: Namespace(),
},
}
return ctrlClient.Delete(ctx, &cluster)
return nil
}

52
cli/cmds/cluster_list.go Normal file
View File

@@ -0,0 +1,52 @@
package cmds
import (
"context"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/printers"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
)
func NewClusterListCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "list",
Short: "List all the existing cluster",
Example: "k3kcli cluster list [command options]",
RunE: list(appCtx),
Args: cobra.NoArgs,
}
CobraFlagNamespace(appCtx, cmd.Flags())
return cmd
}
func list(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
var clusters v1alpha1.ClusterList
if err := client.List(ctx, &clusters, ctrlclient.InNamespace(appCtx.namespace)); err != nil {
return err
}
crd := &apiextensionsv1.CustomResourceDefinition{}
if err := client.Get(ctx, types.NamespacedName{Name: "clusters.k3k.io"}, crd); err != nil {
return err
}
items := toPointerSlice(clusters.Items)
table := createTable(crd, items)
printer := printers.NewTablePrinter(printers.PrintOptions{WithNamespace: true})
return printer.PrintObj(table, cmd.OutOrStdout())
}
}

View File

@@ -8,151 +8,129 @@ import (
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
apierrors "k8s.io/apimachinery/pkg/api/errors"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
name string
cn string
org cli.StringSlice
altNames cli.StringSlice
expirationDays int64
configName string
kubeconfigServerHost string
generateKubeconfigFlags = []cli.Flag{
&cli.StringFlag{
Name: "name",
Usage: "cluster name",
Destination: &name,
},
&cli.StringFlag{
Name: "config-name",
Usage: "the name of the generated kubeconfig file",
Destination: &configName,
},
&cli.StringFlag{
Name: "cn",
Usage: "Common name (CN) of the generated certificates for the kubeconfig",
Destination: &cn,
Value: controller.AdminCommonName,
},
&cli.StringSliceFlag{
Name: "org",
Usage: "Organization name (ORG) of the generated certificates for the kubeconfig",
Value: &org,
},
&cli.StringSliceFlag{
Name: "altNames",
Usage: "altNames of the generated certificates for the kubeconfig",
Value: &altNames,
},
&cli.Int64Flag{
Name: "expiration-days",
Usage: "Expiration date of the certificates used for the kubeconfig",
Destination: &expirationDays,
Value: 356,
},
&cli.StringFlag{
Name: "kubeconfig-server",
Usage: "override the kubeconfig server host",
Destination: &kubeconfigServerHost,
Value: "",
},
}
)
var subcommands = []*cli.Command{
{
Name: "generate",
Usage: "Generate kubeconfig for clusters",
SkipFlagParsing: false,
Action: generate,
Flags: append(CommonFlags, generateKubeconfigFlags...),
},
type GenerateKubeconfigConfig struct {
name string
configName string
cn string
org []string
altNames []string
expirationDays int64
kubeconfigServerHost string
}
func NewKubeconfigCommand() *cli.Command {
return &cli.Command{
Name: "kubeconfig",
Usage: "Manage kubeconfig for clusters",
Subcommands: subcommands,
func NewKubeconfigCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "kubeconfig",
Short: "Manage kubeconfig for clusters",
}
cmd.AddCommand(
NewKubeconfigGenerateCmd(appCtx),
)
return cmd
}
func generate(clx *cli.Context) error {
restConfig, err := loadRESTConfig()
if err != nil {
return err
func NewKubeconfigGenerateCmd(appCtx *AppContext) *cobra.Command {
cfg := &GenerateKubeconfigConfig{}
cmd := &cobra.Command{
Use: "generate",
Short: "Generate kubeconfig for clusters",
RunE: generate(appCtx, cfg),
Args: cobra.NoArgs,
}
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
}
CobraFlagNamespace(appCtx, cmd.Flags())
generateKubeconfigFlags(cmd, cfg)
clusterKey := types.NamespacedName{
Name: name,
Namespace: Namespace(),
}
return cmd
}
var cluster v1alpha1.Cluster
func generateKubeconfigFlags(cmd *cobra.Command, cfg *GenerateKubeconfigConfig) {
cmd.Flags().StringVar(&cfg.name, "name", "", "cluster name")
cmd.Flags().StringVar(&cfg.configName, "config-name", "", "the name of the generated kubeconfig file")
cmd.Flags().StringVar(&cfg.cn, "cn", controller.AdminCommonName, "Common name (CN) of the generated certificates for the kubeconfig")
cmd.Flags().StringSliceVar(&cfg.org, "org", nil, "Organization name (ORG) of the generated certificates for the kubeconfig")
cmd.Flags().StringSliceVar(&cfg.altNames, "altNames", nil, "altNames of the generated certificates for the kubeconfig")
cmd.Flags().Int64Var(&cfg.expirationDays, "expiration-days", 365, "Expiration date of the certificates used for the kubeconfig")
cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host")
}
ctx := context.Background()
if err := ctrlClient.Get(ctx, clusterKey, &cluster); err != nil {
return err
}
func generate(appCtx *AppContext, cfg *GenerateKubeconfigConfig) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
url, err := url.Parse(restConfig.Host)
if err != nil {
return err
}
clusterKey := types.NamespacedName{
Name: cfg.name,
Namespace: appCtx.Namespace(cfg.name),
}
host := strings.Split(url.Host, ":")
if kubeconfigServerHost != "" {
host = []string{kubeconfigServerHost}
var cluster v1alpha1.Cluster
if err := altNames.Set(kubeconfigServerHost); err != nil {
if err := client.Get(ctx, clusterKey, &cluster); err != nil {
return err
}
url, err := url.Parse(appCtx.RestConfig.Host)
if err != nil {
return err
}
host := strings.Split(url.Host, ":")
if cfg.kubeconfigServerHost != "" {
host = []string{cfg.kubeconfigServerHost}
cfg.altNames = append(cfg.altNames, cfg.kubeconfigServerHost)
}
certAltNames := certs.AddSANs(cfg.altNames)
if len(cfg.org) == 0 {
cfg.org = []string{user.SystemPrivilegedGroup}
}
kubeCfg := kubeconfig.KubeConfig{
CN: cfg.cn,
ORG: cfg.org,
ExpiryDate: time.Hour * 24 * time.Duration(cfg.expirationDays),
AltNames: certAltNames,
}
logrus.Infof("waiting for cluster to be available..")
var kubeconfig *clientcmdapi.Config
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = kubeCfg.Generate(ctx, client, &cluster, host[0], 0)
return err
}); err != nil {
return err
}
return writeKubeconfigFile(&cluster, kubeconfig, cfg.configName)
}
}
certAltNames := certs.AddSANs(altNames.Value())
orgs := org.Value()
if orgs == nil {
orgs = []string{user.SystemPrivilegedGroup}
}
cfg := kubeconfig.KubeConfig{
CN: cn,
ORG: orgs,
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
AltNames: certAltNames,
}
logrus.Infof("waiting for cluster to be available..")
var kubeconfig *clientcmdapi.Config
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = cfg.Extract(ctx, ctrlClient, &cluster, host[0])
return err
}); err != nil {
return err
func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config, configName string) error {
if configName == "" {
configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml"
}
pwd, err := os.Getwd()
@@ -160,11 +138,7 @@ func generate(clx *cli.Context) error {
return err
}
if configName == "" {
configName = cluster.Name + "-kubeconfig.yaml"
}
logrus.Infof(`You can start using the cluster with:
logrus.Infof(`You can start using the cluster with:
export KUBECONFIG=%s
kubectl cluster-info
@@ -175,5 +149,5 @@ func generate(clx *cli.Context) error {
return err
}
return os.WriteFile(configName, kubeconfigData, 0644)
return os.WriteFile(configName, kubeconfigData, 0o644)
}

20
cli/cmds/policy.go Normal file
View File

@@ -0,0 +1,20 @@
package cmds
import (
"github.com/spf13/cobra"
)
func NewPolicyCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "policy",
Short: "policy command",
}
cmd.AddCommand(
NewPolicyCreateCmd(appCtx),
NewPolicyDeleteCmd(appCtx),
NewPolicyListCmd(appCtx),
)
return cmd
}

109
cli/cmds/policy_create.go Normal file
View File

@@ -0,0 +1,109 @@
package cmds
import (
"context"
"errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/policy"
)
type VirtualClusterPolicyCreateConfig struct {
mode string
}
func NewPolicyCreateCmd(appCtx *AppContext) *cobra.Command {
config := &VirtualClusterPolicyCreateConfig{}
cmd := &cobra.Command{
Use: "create",
Short: "Create new policy",
Example: "k3kcli policy create [command options] NAME",
PreRunE: func(cmd *cobra.Command, args []string) error {
switch config.mode {
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)
}
},
RunE: policyCreateAction(appCtx, config),
Args: cobra.ExactArgs(1),
}
cmd.Flags().StringVar(&config.mode, "mode", "shared", "The allowed mode type of the policy")
return cmd
}
func policyCreateAction(appCtx *AppContext, config *VirtualClusterPolicyCreateConfig) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
policyName := args[0]
_, err := createPolicy(ctx, client, v1alpha1.ClusterMode(config.mode), policyName)
return err
}
}
func createNamespace(ctx context.Context, client client.Client, name, policyName string) error {
ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}}
if policyName != "" {
ns.Labels = map[string]string{
policy.PolicyNameLabelKey: policyName,
}
}
if err := client.Get(ctx, types.NamespacedName{Name: name}, ns); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
logrus.Infof(`Creating namespace [%s]`, name)
if err := client.Create(ctx, ns); err != nil {
return err
}
}
return nil
}
func createPolicy(ctx context.Context, client client.Client, mode v1alpha1.ClusterMode, policyName string) (*v1alpha1.VirtualClusterPolicy, error) {
logrus.Infof("Creating policy [%s]", policyName)
policy := &v1alpha1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: policyName,
},
TypeMeta: metav1.TypeMeta{
Kind: "VirtualClusterPolicy",
APIVersion: "k3k.io/v1alpha1",
},
Spec: v1alpha1.VirtualClusterPolicySpec{
AllowedMode: mode,
},
}
if err := client.Create(ctx, policy); err != nil {
if !apierrors.IsAlreadyExists(err) {
return nil, err
}
logrus.Infof("Policy [%s] already exists", policyName)
}
return policy, nil
}

43
cli/cmds/policy_delete.go Normal file
View File

@@ -0,0 +1,43 @@
package cmds
import (
"context"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
)
func NewPolicyDeleteCmd(appCtx *AppContext) *cobra.Command {
return &cobra.Command{
Use: "delete",
Short: "Delete an existing policy",
Example: "k3kcli policy delete [command options] NAME",
RunE: policyDeleteAction(appCtx),
Args: cobra.ExactArgs(1),
}
}
func policyDeleteAction(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
name := args[0]
policy := &v1alpha1.VirtualClusterPolicy{}
policy.Name = name
if err := client.Delete(ctx, policy); err != nil {
if apierrors.IsNotFound(err) {
logrus.Warnf("Policy not found")
} else {
return err
}
}
return nil
}
}

47
cli/cmds/policy_list.go Normal file
View File

@@ -0,0 +1,47 @@
package cmds
import (
"context"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/printers"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
)
func NewPolicyListCmd(appCtx *AppContext) *cobra.Command {
return &cobra.Command{
Use: "list",
Short: "List all the existing policies",
Example: "k3kcli policy list [command options]",
RunE: policyList(appCtx),
Args: cobra.NoArgs,
}
}
func policyList(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
var policies v1alpha1.VirtualClusterPolicyList
if err := client.List(ctx, &policies); err != nil {
return err
}
crd := &apiextensionsv1.CustomResourceDefinition{}
if err := client.Get(ctx, types.NamespacedName{Name: "virtualclusterpolicies.k3k.io"}, crd); err != nil {
return err
}
items := toPointerSlice(policies.Items)
table := createTable(crd, items)
printer := printers.NewTablePrinter(printers.PrintOptions{})
return printer.PrintObj(table, cmd.OutOrStdout())
}
}

View File

@@ -2,99 +2,118 @@ package cmds
import (
"fmt"
"strings"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
const (
defaultNamespace = "default"
)
type AppContext struct {
RestConfig *rest.Config
Client client.Client
var (
Scheme = runtime.NewScheme()
debug bool
// Global flags
Debug bool
Kubeconfig string
namespace string
CommonFlags = []cli.Flag{
&cli.StringFlag{
Name: "kubeconfig",
Usage: "kubeconfig path",
Destination: &Kubeconfig,
DefaultText: "$HOME/.kube/config or $KUBECONFIG if set",
},
&cli.StringFlag{
Name: "namespace",
Usage: "namespace to create the k3k cluster in",
Destination: &namespace,
},
}
)
func init() {
_ = clientgoscheme.AddToScheme(Scheme)
_ = v1alpha1.AddToScheme(Scheme)
}
func NewApp() *cli.App {
app := cli.NewApp()
app.Name = "k3kcli"
app.Usage = "CLI for K3K"
app.Flags = []cli.Flag{
&cli.BoolFlag{
Name: "debug",
Usage: "Turn on debug logs",
Destination: &debug,
EnvVars: []string{"K3K_DEBUG"},
func NewRootCmd() *cobra.Command {
appCtx := &AppContext{}
rootCmd := &cobra.Command{
Use: "k3kcli",
Short: "CLI for K3K",
Version: buildinfo.Version,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
InitializeConfig(cmd)
if appCtx.Debug {
logrus.SetLevel(logrus.DebugLevel)
}
restConfig, err := loadRESTConfig(appCtx.Kubeconfig)
if err != nil {
return err
}
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
_ = v1alpha1.AddToScheme(scheme)
_ = apiextensionsv1.AddToScheme(scheme)
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})
if err != nil {
return err
}
appCtx.RestConfig = restConfig
appCtx.Client = ctrlClient
return nil
},
DisableAutoGenTag: true,
}
app.Before = func(clx *cli.Context) error {
if debug {
logrus.SetLevel(logrus.DebugLevel)
}
rootCmd.PersistentFlags().StringVar(&appCtx.Kubeconfig, "kubeconfig", "", "kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)")
rootCmd.PersistentFlags().BoolVar(&appCtx.Debug, "debug", false, "Turn on debug logs")
return nil
}
rootCmd.AddCommand(
NewClusterCmd(appCtx),
NewPolicyCmd(appCtx),
NewKubeconfigCmd(appCtx),
)
app.Version = buildinfo.Version
cli.VersionPrinter = func(cCtx *cli.Context) {
fmt.Println("k3kcli Version: " + buildinfo.Version)
}
app.Commands = []*cli.Command{
NewClusterCommand(),
NewKubeconfigCommand(),
}
return app
return rootCmd
}
func Namespace() string {
if namespace == "" {
return defaultNamespace
func (ctx *AppContext) Namespace(name string) string {
if ctx.namespace != "" {
return ctx.namespace
}
return namespace
return "k3k-" + name
}
func loadRESTConfig() (*rest.Config, error) {
func loadRESTConfig(kubeconfig string) (*rest.Config, error) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
configOverrides := &clientcmd.ConfigOverrides{}
if Kubeconfig != "" {
loadingRules.ExplicitPath = Kubeconfig
if kubeconfig != "" {
loadingRules.ExplicitPath = kubeconfig
}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
return kubeConfig.ClientConfig()
}
func CobraFlagNamespace(appCtx *AppContext, flag *pflag.FlagSet) {
flag.StringVarP(&appCtx.namespace, "namespace", "n", "", "namespace of the k3k cluster")
}
func InitializeConfig(cmd *cobra.Command) {
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
viper.AutomaticEnv()
// Bind the current command's flags to viper
cmd.Flags().VisitAll(func(f *pflag.Flag) {
// Apply the viper config value to the flag when the flag is not set and viper has a value
if !f.Changed && viper.IsSet(f.Name) {
val := viper.Get(f.Name)
_ = cmd.Flags().Set(f.Name, fmt.Sprintf("%v", val))
}
})
}

104
cli/cmds/table_printer.go Normal file
View File

@@ -0,0 +1,104 @@
package cmds
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/util/jsonpath"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// createTable creates a table to print from the printerColumn defined in the CRD spec, plus the name at the beginning
func createTable[T runtime.Object](crd *apiextensionsv1.CustomResourceDefinition, objs []T) *metav1.Table {
printerColumns := getPrinterColumnsFromCRD(crd)
return &metav1.Table{
TypeMeta: metav1.TypeMeta{APIVersion: "meta.k8s.io/v1", Kind: "Table"},
ColumnDefinitions: convertToTableColumns(printerColumns),
Rows: createTableRows(objs, printerColumns),
}
}
func getPrinterColumnsFromCRD(crd *apiextensionsv1.CustomResourceDefinition) []apiextensionsv1.CustomResourceColumnDefinition {
printerColumns := []apiextensionsv1.CustomResourceColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: "Name of the Resource", JSONPath: ".metadata.name"},
}
for _, version := range crd.Spec.Versions {
if version.Name == "v1alpha1" {
printerColumns = append(printerColumns, version.AdditionalPrinterColumns...)
break
}
}
return printerColumns
}
func convertToTableColumns(printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []metav1.TableColumnDefinition {
var columnDefinitions []metav1.TableColumnDefinition
for _, col := range printerColumns {
columnDefinitions = append(columnDefinitions, metav1.TableColumnDefinition{
Name: col.Name,
Type: col.Type,
Format: col.Format,
Description: col.Description,
Priority: col.Priority,
})
}
return columnDefinitions
}
func createTableRows[T runtime.Object](objs []T, printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []metav1.TableRow {
var rows []metav1.TableRow
for _, obj := range objs {
objMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&obj)
if err != nil {
rows = append(rows, metav1.TableRow{Cells: []any{"<error: " + err.Error() + ">"}})
continue
}
rows = append(rows, metav1.TableRow{
Cells: buildRowCells(objMap, printerColumns),
Object: runtime.RawExtension{Object: obj},
})
}
return rows
}
func buildRowCells(objMap map[string]any, printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []any {
var cells []any
for _, printCol := range printerColumns {
j := jsonpath.New(printCol.Name)
err := j.Parse("{" + printCol.JSONPath + "}")
if err != nil {
cells = append(cells, "<error>")
continue
}
results, err := j.FindResults(objMap)
if err != nil || len(results) == 0 || len(results[0]) == 0 {
cells = append(cells, "<none>")
continue
}
cells = append(cells, results[0][0].Interface())
}
return cells
}
func toPointerSlice[T any](v []T) []*T {
vPtr := make([]*T, len(v))
for i := range v {
vPtr[i] = &v[i]
}
return vPtr
}

View File

@@ -1,15 +1,14 @@
package main
import (
"os"
"github.com/sirupsen/logrus"
"github.com/rancher/k3k/cli/cmds"
"github.com/sirupsen/logrus"
)
func main() {
app := cmds.NewApp()
if err := app.Run(os.Args); err != nil {
app := cmds.NewRootCmd()
if err := app.Execute(); err != nil {
logrus.Fatal(err)
}
}

View File

@@ -122,7 +122,7 @@ You can check the [k3kcli documentation](./cli/cli-docs.md) for the full specs.
* Ephemeral Storage:
```bash
k3kcli cluster create my-cluster --persistence-type ephemeral
k3kcli cluster create --persistence-type ephemeral my-cluster
```
*Important Notes:*

View File

@@ -88,6 +88,25 @@ K3k consists of two main components:
* **CLI:** The K3k CLI provides a command-line interface for interacting with K3k. It allows users to easily create, manage, and access virtual clusters. The CLI simplifies common tasks such as creating `Cluster` CRs, retrieving kubeconfigs for accessing virtual clusters, and performing other management operations.
## VirtualClusterPolicy
K3k introduces the VirtualClusterPolicy Custom Resource, a way to set up and apply common configurations and how your virtual clusters operate within the K3k environment.
The primary goal of VCPs is to allow administrators to centrally manage and apply consistent policies. This reduces repetitive configuration, helps meet organizational standards, and enhances the security and operational consistency of virtual clusters managed by K3k.
A VirtualClusterPolicy is bound to one or more Kubernetes Namespaces. Once bound, the rules defined in the VCP apply to all K3k virtual clusters that are running or get created in that Namespace. This allows for flexible policy application, meaning different Namespaces can use their own unique VCPs, while others can share a single VCP for a consistent setup.
Common use cases for administrators leveraging VirtualClusterPolicy include:
- Defining the operational mode (like "shared" or "virtual") for virtual clusters.
- Setting up resource quotas and limit ranges to effectively manage how much resources virtual clusters and their workloads can use.
- Enforcing security standards, for example, by configuring Pod Security Admission (PSA) labels for Namespaces.
The K3k controller actively monitors VirtualClusterPolicy resources and the corresponding Namespace bindings. When a VCP is applied or updated, the controller ensures that the defined configurations are enforced on the relevant virtual clusters and their associated resources within the targeted Namespaces.
For a deep dive into what VirtualClusterPolicy can do, along with more examples, check out the [VirtualClusterPolicy Concepts](./virtualclusterpolicy.md) page. For a full list of all the spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crd-docs.md#virtualclusterpolicy).
## Comparison and Trade-offs
K3k offers two distinct modes for deploying virtual clusters: `shared` and `virtual`. Each mode has its own strengths and weaknesses, and the best choice depends on the specific needs and priorities of the user. Here's a comparison to help you make an informed decision:

View File

@@ -1,98 +0,0 @@
# NAME
k3kcli - CLI for K3K
# SYNOPSIS
k3kcli
```
[--debug]
```
**Usage**:
```
k3kcli [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
```
# GLOBAL OPTIONS
**--debug**: Turn on debug logs
# COMMANDS
## cluster
cluster command
### create
Create new cluster
>k3kcli cluster create [command options] NAME
**--agent-args**="": agents extra arguments
**--agents**="": number of agents (default: 0)
**--cluster-cidr**="": cluster CIDR
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--kubeconfig-server**="": override the kubeconfig server host
**--mode**="": k3k mode type (shared, virtual) (default: "shared")
**--namespace**="": namespace to create the k3k cluster in
**--persistence-type**="": persistence mode for the nodes (dynamic, ephemeral, static) (default: "dynamic")
**--server-args**="": servers extra arguments
**--servers**="": number of servers (default: 1)
**--service-cidr**="": service CIDR
**--storage-class-name**="": storage class name for dynamic persistence type
**--token**="": token of the cluster
**--version**="": k3s version
### delete
Delete an existing cluster
>k3kcli cluster delete [command options] NAME
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--namespace**="": namespace to create the k3k cluster in
## kubeconfig
Manage kubeconfig for clusters
### generate
Generate kubeconfig for clusters
**--altNames**="": altNames of the generated certificates for the kubeconfig
**--cn**="": Common name (CN) of the generated certificates for the kubeconfig (default: "system:admin")
**--config-name**="": the name of the generated kubeconfig file
**--expiration-days**="": Expiration date of the certificates used for the kubeconfig (default: 356)
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--kubeconfig-server**="": override the kubeconfig server host
**--name**="": cluster name
**--namespace**="": namespace to create the k3k cluster in
**--org**="": Organization name (ORG) of the generated certificates for the kubeconfig

View File

@@ -5,19 +5,14 @@ import (
"os"
"path"
"github.com/spf13/cobra/doc"
"github.com/rancher/k3k/cli/cmds"
)
func main() {
// Instantiate the CLI application
app := cmds.NewApp()
// Generate the Markdown documentation
md, err := app.ToMarkdown()
if err != nil {
fmt.Println("Error generating documentation:", err)
os.Exit(1)
}
k3kcli := cmds.NewRootCmd()
wd, err := os.Getwd()
if err != nil {
@@ -25,13 +20,12 @@ func main() {
os.Exit(1)
}
outputFile := path.Join(wd, "docs/cli/cli-docs.md")
outputDir := path.Join(wd, "docs/cli")
err = os.WriteFile(outputFile, []byte(md), 0644)
if err != nil {
if err := doc.GenMarkdownTree(k3kcli, outputDir); err != nil {
fmt.Println("Error generating documentation:", err)
os.Exit(1)
}
fmt.Println("Documentation generated at " + outputFile)
fmt.Println("Documentation generated at " + outputDir)
}

18
docs/cli/k3kcli.md Normal file
View File

@@ -0,0 +1,18 @@
## k3kcli
CLI for K3K
### Options
```
--debug Turn on debug logs
-h, --help help for k3kcli
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters
* [k3kcli policy](k3kcli_policy.md) - policy command

View File

@@ -0,0 +1,24 @@
## k3kcli cluster
cluster command
### Options
```
-h, --help help for cluster
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli](k3kcli.md) - CLI for K3K
* [k3kcli cluster create](k3kcli_cluster_create.md) - Create new cluster
* [k3kcli cluster delete](k3kcli_cluster_delete.md) - Delete an existing cluster
* [k3kcli cluster list](k3kcli_cluster_list.md) - List all the existing cluster

View File

@@ -0,0 +1,50 @@
## k3kcli cluster create
Create new cluster
```
k3kcli cluster create [flags]
```
### Examples
```
k3kcli cluster create [command options] NAME
```
### Options
```
--agent-args strings agents extra arguments
--agent-envs strings agents extra Envs
--agents int number of agents
--cluster-cidr string cluster CIDR
--custom-certs string The path for custom certificate directory
-h, --help help for create
--kubeconfig-server string override the kubeconfig server host
--mirror-host-nodes Mirror Host Cluster Nodes
--mode string k3k mode type (shared, virtual) (default "shared")
-n, --namespace string namespace of the k3k cluster
--persistence-type string persistence mode for the nodes (dynamic, ephemeral, static) (default "dynamic")
--policy string The policy to create the cluster in
--server-args strings servers extra arguments
--server-envs strings servers extra Envs
--servers int number of servers (default 1)
--service-cidr string service CIDR
--storage-class-name string storage class name for dynamic persistence type
--storage-request-size string storage size for dynamic persistence type
--token string token of the cluster
--version string k3s version
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli cluster](k3kcli_cluster.md) - cluster command

View File

@@ -0,0 +1,33 @@
## k3kcli cluster delete
Delete an existing cluster
```
k3kcli cluster delete [flags]
```
### Examples
```
k3kcli cluster delete [command options] NAME
```
### Options
```
-h, --help help for delete
--keep-data keeps persistence volumes created for the cluster after deletion
-n, --namespace string namespace of the k3k cluster
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli cluster](k3kcli_cluster.md) - cluster command

View File

@@ -0,0 +1,32 @@
## k3kcli cluster list
List all the existing cluster
```
k3kcli cluster list [flags]
```
### Examples
```
k3kcli cluster list [command options]
```
### Options
```
-h, --help help for list
-n, --namespace string namespace of the k3k cluster
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli cluster](k3kcli_cluster.md) - cluster command

View File

@@ -0,0 +1,22 @@
## k3kcli kubeconfig
Manage kubeconfig for clusters
### Options
```
-h, --help help for kubeconfig
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli](k3kcli.md) - CLI for K3K
* [k3kcli kubeconfig generate](k3kcli_kubeconfig_generate.md) - Generate kubeconfig for clusters

View File

@@ -0,0 +1,33 @@
## k3kcli kubeconfig generate
Generate kubeconfig for clusters
```
k3kcli kubeconfig generate [flags]
```
### Options
```
--altNames strings altNames of the generated certificates for the kubeconfig
--cn string Common name (CN) of the generated certificates for the kubeconfig (default "system:admin")
--config-name string the name of the generated kubeconfig file
--expiration-days int Expiration date of the certificates used for the kubeconfig (default 365)
-h, --help help for generate
--kubeconfig-server string override the kubeconfig server host
--name string cluster name
-n, --namespace string namespace of the k3k cluster
--org strings Organization name (ORG) of the generated certificates for the kubeconfig
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters

24
docs/cli/k3kcli_policy.md Normal file
View File

@@ -0,0 +1,24 @@
## k3kcli policy
policy command
### Options
```
-h, --help help for policy
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli](k3kcli.md) - CLI for K3K
* [k3kcli policy create](k3kcli_policy_create.md) - Create new policy
* [k3kcli policy delete](k3kcli_policy_delete.md) - Delete an existing policy
* [k3kcli policy list](k3kcli_policy_list.md) - List all the existing policies

View File

@@ -0,0 +1,32 @@
## k3kcli policy create
Create new policy
```
k3kcli policy create [flags]
```
### Examples
```
k3kcli policy create [command options] NAME
```
### Options
```
-h, --help help for create
--mode string The allowed mode type of the policy (default "shared")
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli policy](k3kcli_policy.md) - policy command

View File

@@ -0,0 +1,31 @@
## k3kcli policy delete
Delete an existing policy
```
k3kcli policy delete [flags]
```
### Examples
```
k3kcli policy delete [command options] NAME
```
### Options
```
-h, --help help for delete
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli policy](k3kcli_policy.md) - policy command

View File

@@ -0,0 +1,31 @@
## k3kcli policy list
List all the existing policies
```
k3kcli policy list [flags]
```
### Examples
```
k3kcli policy list [command options]
```
### Options
```
-h, --help help for list
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli policy](k3kcli_policy.md) - policy command

View File

@@ -1,9 +1,4 @@
processor:
# RE2 regular expressions describing types that should be excluded from the generated documentation.
ignoreTypes:
- ClusterSet
- ClusterSetList
# RE2 regular expressions describing type fields that should be excluded from the generated documentation.
ignoreFields:
- "status$"

View File

@@ -10,6 +10,8 @@
### Resource Types
- [Cluster](#cluster)
- [ClusterList](#clusterlist)
- [VirtualClusterPolicy](#virtualclusterpolicy)
- [VirtualClusterPolicyList](#virtualclusterpolicylist)
@@ -51,23 +53,6 @@ _Appears in:_
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
#### ClusterLimit
ClusterLimit defines resource limits for server and agent nodes.
_Appears in:_
- [ClusterSpec](#clusterspec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
#### ClusterList
@@ -97,6 +82,20 @@ _Validation:_
_Appears in:_
- [ClusterSpec](#clusterspec)
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
#### ClusterPhase
_Underlying type:_ _string_
ClusterPhase is a high-level summary of the cluster's current lifecycle state.
_Appears in:_
- [ClusterStatus](#clusterstatus)
@@ -120,20 +119,81 @@ _Appears in:_
| `clusterCIDR` _string_ | ClusterCIDR is the CIDR range for pod IPs.<br />Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.<br />This field is immutable. | | |
| `serviceCIDR` _string_ | ServiceCIDR is the CIDR range for service IPs.<br />Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.<br />This field is immutable. | | |
| `clusterDNS` _string_ | ClusterDNS is the IP address for the CoreDNS service.<br />Must be within the ServiceCIDR range. Defaults to 10.43.0.10.<br />This field is immutable. | | |
| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence specifies options for persisting etcd data.<br />Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.<br />A default StorageClass is required for dynamic persistence. | \{ type:dynamic \} | |
| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence specifies options for persisting etcd data.<br />Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.<br />A default StorageClass is required for dynamic persistence. | | |
| `expose` _[ExposeConfig](#exposeconfig)_ | Expose specifies options for exposing the API server.<br />By default, it's only exposed as a ClusterIP. | | |
| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector specifies node labels to constrain where server/agent pods are scheduled.<br />In "shared" mode, this also applies to workloads. | | |
| `priorityClass` _string_ | PriorityClass specifies the priorityClassName for server/agent pods.<br />In "shared" mode, this also applies to workloads. | | |
| `clusterLimit` _[ClusterLimit](#clusterlimit)_ | Limit defines resource limits for server/agent nodes. | | |
| `tokenSecretRef` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#secretreference-v1-core)_ | TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.<br />The Secret must have a "token" field in its data. | | |
| `tlsSANs` _string array_ | TLSSANs specifies subject alternative names for the K3s server certificate. | | |
| `serverArgs` _string array_ | ServerArgs specifies ordered key-value pairs for K3s server pods.<br />Example: ["--tls-san=example.com"] | | |
| `agentArgs` _string array_ | AgentArgs specifies ordered key-value pairs for K3s agent pods.<br />Example: ["--node-name=my-agent-node"] | | |
| `serverEnvs` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#envvar-v1-core) array_ | ServerEnvs specifies list of environment variables to set in the server pod. | | |
| `agentEnvs` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#envvar-v1-core) array_ | AgentEnvs specifies list of environment variables to set in the agent pod. | | |
| `addons` _[Addon](#addon) array_ | Addons specifies secrets containing raw YAML to deploy on cluster startup. | | |
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
| `mirrorHostNodes` _boolean_ | MirrorHostNodes controls whether node objects from the host cluster<br />are mirrored into the virtual cluster. | | |
| `customCAs` _[CustomCAs](#customcas)_ | CustomCAs specifies the cert/key pairs for custom CA certificates. | | |
#### CredentialSource
CredentialSource defines where to get a credential from.
It can represent either a TLS key pair or a single private key.
_Appears in:_
- [CredentialSources](#credentialsources)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `secretName` _string_ | SecretName specifies the name of an existing secret to use.<br />The controller expects specific keys inside based on the credential type:<br />- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.<br />- For ServiceAccountTokenKey: 'tls.key'. | | |
#### CredentialSources
CredentialSources lists all the required credentials, including both
TLS key pairs and single signing keys.
_Appears in:_
- [CustomCAs](#customcas)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `serverCA` _[CredentialSource](#credentialsource)_ | ServerCA specifies the server-ca cert/key pair. | | |
| `clientCA` _[CredentialSource](#credentialsource)_ | ClientCA specifies the client-ca cert/key pair. | | |
| `requestHeaderCA` _[CredentialSource](#credentialsource)_ | RequestHeaderCA specifies the request-header-ca cert/key pair. | | |
| `etcdServerCA` _[CredentialSource](#credentialsource)_ | ETCDServerCA specifies the etcd-server-ca cert/key pair. | | |
| `etcdPeerCA` _[CredentialSource](#credentialsource)_ | ETCDPeerCA specifies the etcd-peer-ca cert/key pair. | | |
| `serviceAccountToken` _[CredentialSource](#credentialsource)_ | ServiceAccountToken specifies the service-account-token key. | | |
#### CustomCAs
CustomCAs specifies the cert/key pairs for custom CA certificates.
_Appears in:_
- [ClusterSpec](#clusterspec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled toggles this feature on or off. | | |
| `sources` _[CredentialSources](#credentialsources)_ | Sources defines the sources for all required custom CA certificates. | | |
#### ExposeConfig
@@ -180,6 +240,10 @@ LoadBalancerConfig specifies options for exposing the API server through a LoadB
_Appears in:_
- [ExposeConfig](#exposeconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `serverPort` _integer_ | ServerPort is the port on which the K3s server is exposed when type is LoadBalancer.<br />If not specified, the default https 443 port will be allocated.<br />If 0 or negative, the port will not be exposed. | | |
| `etcdPort` _integer_ | ETCDPort is the port on which the ETCD service is exposed when type is LoadBalancer.<br />If not specified, the default etcd 2379 port will be allocated.<br />If 0 or negative, the port will not be exposed. | | |
#### NodePortConfig
@@ -195,9 +259,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `serverPort` _integer_ | ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
| `servicePort` _integer_ | ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
| `etcdPort` _integer_ | ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
| `serverPort` _integer_ | ServerPort is the port on each node on which the K3s server is exposed when type is NodePort.<br />If not specified, a random port between 30000-32767 will be allocated.<br />If out of range, the port will not be exposed. | | |
| `etcdPort` _integer_ | ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.<br />If not specified, a random port between 30000-32767 will be allocated.<br />If out of range, the port will not be exposed. | | |
#### PersistenceConfig
@@ -210,13 +273,12 @@ PersistenceConfig specifies options for persisting etcd data.
_Appears in:_
- [ClusterSpec](#clusterspec)
- [ClusterStatus](#clusterstatus)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `type` _[PersistenceMode](#persistencemode)_ | Type specifies the persistence mode. | dynamic | |
| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 1G | |
#### PersistenceMode
@@ -232,5 +294,79 @@ _Appears in:_
#### PodSecurityAdmissionLevel
_Underlying type:_ _string_
PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
_Validation:_
- Enum: [privileged baseline restricted]
_Appears in:_
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
#### VirtualClusterPolicy
VirtualClusterPolicy allows defining common configurations and constraints
for clusters within a clusterpolicy.
_Appears in:_
- [VirtualClusterPolicyList](#virtualclusterpolicylist)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `kind` _string_ | `VirtualClusterPolicy` | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[VirtualClusterPolicySpec](#virtualclusterpolicyspec)_ | Spec defines the desired state of the VirtualClusterPolicy. | \{ \} | |
#### VirtualClusterPolicyList
VirtualClusterPolicyList is a list of VirtualClusterPolicy resources.
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `kind` _string_ | `VirtualClusterPolicyList` | | |
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `items` _[VirtualClusterPolicy](#virtualclusterpolicy) array_ | | | |
#### VirtualClusterPolicySpec
VirtualClusterPolicySpec defines the desired state of a VirtualClusterPolicy.
_Appears in:_
- [VirtualClusterPolicy](#virtualclusterpolicy)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `quota` _[ResourceQuotaSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcequotaspec-v1-core)_ | Quota specifies the resource limits for clusters within a clusterpolicy. | | |
| `limit` _[LimitRangeSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#limitrangespec-v1-core)_ | Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy<br />to set defaults and constraints (min/max) | | |
| `defaultNodeSelector` _object (keys:string, values:string)_ | DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace. | | |
| `defaultPriorityClass` _string_ | DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the target Namespace. | | |
| `allowedMode` _[ClusterMode](#clustermode)_ | AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared". | shared | Enum: [shared virtual] <br /> |
| `disableNetworkPolicy` _boolean_ | DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation. | | |
| `podSecurityAdmissionLevel` _[PodSecurityAdmissionLevel](#podsecurityadmissionlevel)_ | PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace. | | Enum: [privileged baseline restricted] <br /> |

View File

@@ -33,7 +33,7 @@ To see all the available Make commands you can run `make help`, i.e:
```
-> % make help
all Run 'make' or 'make all' to run 'version', 'build-crds', 'build' and 'package'
all Run 'make' or 'make all' to run 'version', 'generate', 'build' and 'package'
version Print the current version
build Build the the K3k binaries (k3k, k3k-kubelet and k3kcli)
package Package the k3k and k3k-kubelet Docker images
@@ -41,9 +41,10 @@ To see all the available Make commands you can run `make help`, i.e:
test Run all the tests
test-unit Run the unit tests (skips the e2e)
test-controller Run the controller tests (pkg/controller)
test-kubelet-controller Run the controller tests (pkg/controller)
test-e2e Run the e2e tests
build-crds Build the CRDs specs
docs Build the CRDs docs
generate Generate the CRDs specs
docs Build the CRDs and CLI docs
lint Find any linting issues in the project
validate Validate the project checking for any dependency or doc mismatch
install Install K3k with Helm on the targeted Kubernetes cluster
@@ -88,7 +89,7 @@ The required binaries for `envtest` are installed with [`setup-envtest`](https:/
## CRDs and Docs
We are using Kubebuilder and `controller-gen` to build the needed CRDs. To generate the specs you can run `make build-crds`.
We are using Kubebuilder and `controller-gen` to build the needed CRDs. To generate the specs you can run `make generate`.
Remember also to update the CRDs documentation running the `make docs` command.
@@ -114,7 +115,7 @@ Install now k3k as usual:
```bash
helm repo update
helm install --namespace k3k-system --create-namespace k3k k3k/k3k --devel
helm install --namespace k3k-system --create-namespace k3k k3k/k3k
```
### Create a virtual cluster

83
docs/howtos/airgap.md Normal file
View File

@@ -0,0 +1,83 @@
# K3k Air Gap Installation Guide
Applicable K3k modes: `virtual`, `shared`
This guide describes how to deploy **K3k** in an **air-gapped environment**, including the packaging of required images, Helm chart configurations, and cluster creation using a private container registry.
---
## 1. Package Required Container Images
### 1.1: Follow K3s Air Gap Preparation
Begin with the official K3s air gap packaging instructions:
[K3s Air Gap Installation Docs](https://docs.k3s.io/installation/airgap)
### 1.2: Include K3k-Specific Images
In addition to the K3s images, make sure to include the following in your image bundle:
| Image Names | Descriptions |
| --------------------------- | --------------------------------------------------------------- |
| `rancher/k3k:<tag>` | K3k controller image (replace `<tag>` with the desired version) |
| `rancher/k3k-kubelet:<tag>` | K3k agent image for shared mode |
| `rancher/k3s:<tag>` | K3s server/agent image for virtual clusters |
Load these images into your internal (air-gapped) registry.
---
## 2. Configure Helm Chart for Air Gap installation
Update the `values.yaml` file in the K3k Helm chart with air gap settings:
```yaml
image:
repository: rancher/k3k
tag: "" # Specify the version tag
pullPolicy: "" # Optional: "IfNotPresent", "Always", etc.
sharedAgent:
image:
repository: rancher/k3k-kubelet
tag: "" # Specify the version tag
pullPolicy: "" # Optional
k3sServer:
image:
repository: rancher/k3s
pullPolicy: "" # Optional
```
These values enforce the use of internal image repositories for the K3k controller, the agent and the server.
**Note** : All virtual clusters will use automatically those settings.
---
## 3. Enforce Registry in Virtual Clusters
When creating a virtual cluster, use the `--system-default-registry` flag to ensure all system components (e.g., CoreDNS) pull from your internal registry:
```bash
k3kcli cluster create \
--server-args "--system-default-registry=registry.internal.domain" \
my-cluster
```
This flag is passed directly to the K3s server in the virtual cluster, influencing all system workload image pulls.
[K3s Server CLI Reference](https://docs.k3s.io/cli/server#k3s-server-cli-help)
---
## 4. Specify K3s Version for Virtual Clusters
K3k allows specifying the K3s version used in each virtual cluster:
```bash
k3kcli cluster create \
--k3s-version v1.29.4+k3s1 \
my-cluster
```
- If omitted, the **host clusters K3s version** will be used by default, which might not exist if it's not part of the air gap package.

View File

@@ -0,0 +1,79 @@
# How to Choose Between Shared and Virtual Mode
This guide helps you choose the right mode for your virtual cluster: **Shared** or **Virtual**.
If you're unsure, start with **Shared mode** — it's the default and fits most common scenarios.
---
## Shared Mode (default)
**Best for:**
- Developers who want to run workloads quickly without managing Kubernetes internals
- Platform teams that require visibility and control over all workloads
- Users who need access to host-level resources (e.g., GPUs)
In **Shared mode**, the virtual cluster runs its own K3s server but relies on the host to execute workloads. The virtual kubelet syncs resources, enabling lightweight, fast provisioning with support for cluster resource isolation. More details on the [architecture](./../architecture.md#shared-mode).
---
### Use Cases by Persona
#### 👩‍💻 Developer
*"Im building a web app that should be exposed outside the virtual cluster."*
→ Use **Shared mode**. It allows you to [expose](./expose-workloads.md) your application.
#### 👩‍🔬 Data Scientist:
*“I need to run Jupyter notebooks that leverage the cluster's GPU.”*
→ Use **Shared mode**. It gives access to physical devices while keeping overhead low.
#### 🧑‍💼 Platform Admin
*"I want to monitor and secure all tenant workloads from a central location."*
→ Use **Shared mode**. Host-level agents (e.g., observability, policy enforcement) work across all virtual clusters.
#### 🔒 Security Engineer
*"I need to enforce security policies like network policies or runtime scanning across all workloads."*
→ Use **Shared mode**. The platform can enforce policies globally without tenant bypass.
*"I need to test a new admission controller or policy engine."*
→ Use **Shared mode**, if it's scoped to your virtual cluster. You can run tools like Kubewarden without affecting the host.
#### 🔁 CI/CD Engineer
*"I want to spin up disposable virtual clusters per pipeline run, fast and with low resource cost."*
→ Use **Shared mode**. It's quick to provision and ideal for short-lived, namespace-scoped environments.
---
## Virtual Mode
**Best for:**
- Advanced users who need full Kubernetes isolation
- Developers testing experimental or cluster-wide features
- Use cases requiring control over the entire Kubernetes control plane
In **Virtual mode**, the virtual cluster runs its own isolated Kubernetes control plane. It supports different CNIs, and API configurations — ideal for deep experimentation or advanced workloads. More details on the [architecture](./../architecture.md#virtual-mode).
---
### Use Cases by Persona
#### 👩‍💻 Developer
*"I need to test a new Kubernetes feature gate thats disabled in the host cluster."*
→ Use **Virtual mode**. You can configure your own control plane flags and API features.
#### 🧑‍💼 Platform Admin
*"Were testing upgrades across Kubernetes versions, including new API behaviors."*
→ Use Virtual mode. You can run different Kubernetes versions and safely validate upgrade paths.
#### 🌐 Network Engineer
*"Im evaluating a new CNI that needs full control of the clusters networking."*
→ Use **Virtual mode**. You can run a separate CNI stack without affecting the host or other tenants.
#### 🔒 Security Engineer
*"Im testing a new admission controller and policy engine before rolling it out cluster-wide."*
→ Use **Virtual mode**, if you need to test cluster-wide policies, custom admission flow, or advanced extensions with full control.
---
## Still Not Sure?
If you're evaluating more advanced use cases or want a deeper comparison, see the full trade-off breakdown in the [Architecture documentation](../architecture.md).

View File

@@ -0,0 +1,302 @@
# How to: Create a Virtual Cluster
This guide walks through the various ways to create and manage virtual clusters in K3K. We'll cover common use cases using both the **Custom Resource Definitions (CRDs)** and the **K3K CLI**, so you can choose the method that fits your workflow.
> 📘 For full reference:
> - [CRD Reference Documentation](../crds/crd-docs.md)
> - [CLI Reference Documentation](../cli/cli-docs.md)
> - [Full example](../advanced-usage.md)
> [!NOTE]
> 🚧 Some features are currently only available via the CRD interface. CLI support may be added in the future.
---
## Use Case: Create and Expose a Basic Virtual Cluster
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-ingress
spec:
tlsSANs:
- my-cluster.example.com
expose:
ingress:
ingressClassName: nginx
annotations:
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
nginx.ingress.kubernetes.io/ssl-redirect: "HTTPS"
```
This will create a virtual cluster in `shared` mode and expose it via an ingress with the specified hostname.
### CLI Method
*No CLI method available yet*
---
## Use Case: Create a Virtual Cluster with Persistent Storage (**Default**)
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-persistent
spec:
persistence:
type: dynamic
storageClassName: local-path
storageRequestSize: 30Gi
```
This ensures that the virtual cluster stores its state persistently with a 30Gi volume.
If `storageClassName` is not set it will default to the default StorageClass.
If `storageRequestSize` is not set it will request a 1Gi volume by default.
### CLI Method
```sh
k3kcli cluster create \
--persistence-type dynamic \
--storage-class-name local-path \
k3kcluster-persistent
```
> [!NOTE]
> The `k3kcli` does not support configuring the `storageRequestSize` yet.
---
## Use Case: Create a Highly Available Virtual Cluster in `shared` mode
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-ha
spec:
servers: 3
```
This will create a virtual cluster with 3 servers and a default 1Gi volume for persistence.
### CLI Method
```sh
k3kcli cluster create \
--servers 3 \
k3kcluster-ha
```
---
## Use Case: Create a Highly Available Virtual Cluster in `virtual` mode
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-virtual
spec:
mode: virtual
servers: 3
agents: 3
```
This will create a virtual cluster with 3 servers and 3 agents and a default 1Gi volume for persistence.
> [!NOTE]
> Agents only exist for `virtual` mode.
### CLI Method
```sh
k3kcli cluster create \
--agents 3 \
--servers 3 \
--mode virtual \
k3kcluster-virtual
```
---
## Use Case: Create an Ephemeral Virtual Cluster
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-ephemeral
spec:
persistence:
type: ephemeral
```
This will create an ephemeral virtual cluster with no persistence and a single server.
### CLI Method
```sh
k3kcli cluster create \
--persistence-type ephemeral \
k3kcluster-ephemeral
```
---
## Use Case: Create a Virtual Cluster with a Custom Kubernetes Version
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-custom-k8s
spec:
version: "v1.33.1-k3s1"
```
This sets the virtual cluster's Kubernetes version explicitly.
> [!NOTE]
> Only [K3s](https://k3s.io) distributions are supported. You can find compatible versions on the K3s GitHub [release page](https://github.com/k3s-io/k3s/releases).
### CLI Method
```sh
k3kcli cluster create \
--version v1.33.1-k3s1 \
k3kcluster-custom-k8s
```
---
## Use Case: Create a Virtual Cluster with Custom Resource Limits
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-resourced
spec:
mode: virtual
serverLimit:
cpu: "1"
memory: "2Gi"
workerLimit:
cpu: "1"
memory: "2Gi"
```
This configures the CPU and memory limit for the virtual cluster.
### CLI Method
*No CLI method available yet*
---
## Use Case: Create a Virtual Cluster on specific host nodes
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-node-placed
spec:
nodeSelector:
disktype: ssd
```
This places the virtual cluster on nodes with the label `disktype: ssd`.
> [!NOTE]
> In `shared` mode workloads are also scheduled on the selected nodes
### CLI Method
*No CLI method available yet*
---
## Use Case: Create a Virtual Cluster with a Rancher Host Cluster Kubeconfig
When using a `kubeconfig` generated with Rancher, you need to specify with the CLI the desired host for the virtual cluster `kubeconfig`.
By default, `k3kcli` uses the current host `kubeconfig` to determine the target cluster.
### CRD Method
*Not applicable*
### CLI Method
```sh
k3kcli cluster create \
--kubeconfig-server https://abc.xyz \
k3kcluster-host-rancher
```
---
## Use Case: Create a Virtual Cluster Behind an HTTP Proxy
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: k3kcluster-http-proxy
spec:
serverEnvs:
- name: HTTP_PROXY
value: "http://abc.xyz"
agentEnvs:
- name: HTTP_PROXY
value: "http://abc.xyz"
```
This configures an HTTP proxy for both servers and agents in the virtual cluster.
> [!NOTE]
> This can be leveraged to pass **any custom environment variables** to the servers and agents — not just proxy settings.
### CLI Method
```sh
k3kcli cluster create \
--server-envs HTTP_PROXY=http://abc.xyz \
--agent-envs HTTP_PROXY=http://abc.xyz \
k3kcluster-http-proxy
```
---
## How to: Connect to a Virtual Cluster
Once the virtual cluster is running, you can connect to it using the CLI:
### CLI Method
```sh
k3kcli kubeconfig generate --namespace k3k-mycluster --name mycluster
export KUBECONFIG=$PWD/mycluster-kubeconfig.yaml
kubectl get nodes
```
This command generates a `kubeconfig` file, which you can use to access your virtual cluster via `kubectl`.

View File

@@ -0,0 +1,52 @@
# How-to: Expose Workloads Outside the Virtual Cluster
This guide explains how to expose workloads running in k3k-managed virtual clusters to external networks. Behavior varies depending on the operating mode of the virtual cluster.
## Virtual Mode
> [!CAUTION]
> **Not Supported**
> In *virtual mode*, direct external exposure of workloads is **not available**.
> This mode is designed for strong isolation and does not expose the virtual cluster's network directly.
## Shared Mode
In *shared mode*, workloads can be exposed to the external network using standard Kubernetes service types or an ingress controller, depending on your requirements.
> [!NOTE]
> *`Services`* are always synced from the virtual cluster to the host cluster following the same principle described [here](../architecture.md#shared-mode) for pods.
### Option 1: Use `NodePort` or `LoadBalancer`
To expose a service such as a web application outside the host cluster:
- **`NodePort`**:
Exposes the service on a static port on each nodes IP.
Access the service at `http://<NodeIP>:<NodePort>`.
- **`LoadBalancer`**:
Provisions an external load balancer (if supported by the environment) and exposes the service via the load balancers IP.
> **Note**
> The `LoadBalancer` IP is currently not reflected back to the virtual cluster service.
> [k3k issue #365](https://github.com/rancher/k3k/issues/365)
### Option 2: Use `ClusterIP` for Internal Communication
If the workload should only be accessible to other services or pods *within* the host cluster:
- Use the `ClusterIP` service type.
This exposes the service on an internal IP, only reachable inside the host cluster.
### Option 3: Use Ingress for HTTP/HTTPS Routing
For more advanced routing (e.g., hostname- or path-based routing), deploy an **Ingress controller** in the virtual cluster, and expose it via `NodePort` or `LoadBalancer`.
This allows you to:
- Define Ingress resources in the virtual cluster.
- Route external traffic to services within the virtual cluster.
>**Note**
> Support for using the host cluster's Ingress controller from a virtual cluster is being tracked in
> [k3k issue #356](https://github.com/rancher/k3k/issues/356)

View File

@@ -0,0 +1,147 @@
# Troubleshooting
This guide walks through common troubleshooting steps for working with K3K virtual clusters.
---
## `too many open files` error
The `k3k-kubelet` or `k3kcluster-server-` run into the following issue:
```sh
E0604 13:14:53.369369 1 leaderelection.go:336] error initially creating leader election record: Post "https://k3k-http-proxy-k3kcluster-service/apis/coordination.k8s.io/v1/namespaces/kube-system/leases": context canceled
{"level":"fatal","timestamp":"2025-06-04T13:14:53.369Z","logger":"k3k-kubelet","msg":"virtual manager stopped","error":"too many open files"}
```
This typically indicates a low limit on inotify watchers or file descriptors on the host system.
To increase the inotify limits connect to the host nodes and run:
```sh
sudo sysctl -w fs.inotify.max_user_watches=2099999999
sudo sysctl -w fs.inotify.max_user_instances=2099999999
sudo sysctl -w fs.inotify.max_queued_events=2099999999
```
You can persist these settings by adding them to `/etc/sysctl.conf`:
```sh
fs.inotify.max_user_watches=2099999999
fs.inotify.max_user_instances=2099999999
fs.inotify.max_queued_events=2099999999
```
Apply the changes:
```sh
sudo sysctl -p
```
You can find more details in this [KB document](https://www.suse.com/support/kb/doc/?id=000020048).
---
## Inspect Controller Logs for Failure Diagnosis
To view logs for a failed virtual cluster:
```sh
kubectl logs -n k3k-system -l app.kubernetes.io/name=k3k
```
This retrieves logs from K3k controller components.
---
## Inspect Cluster Logs for Failure Diagnosis
To view logs for a failed virtual cluster:
```sh
kubectl logs -n <cluster_namespace> -l cluster=<cluster_name>
```
This retrieves logs from K3k cluster components (`agents, server and virtual-kubelet`).
> 💡 You can also use `kubectl describe cluster <cluster_name>` to check for recent events and status conditions.
---
## Virtual Cluster Not Starting or Stuck in Pending
Some of the most common causes are related to missing prerequisites or wrong configuration.
### Storage class not available
When creating a Virtual Cluster with `dynamic` persistence, a PVC is needed. You can check if the PVC was claimed but not bound with `kubectl get pvc -n <cluster_namespace>`. If you see a pending PVC you probably don't have a default storage class defined, or you have specified a wrong one.
#### Example with wrong storage class
The `pvc` is pending:
```bash
kubectl get pvc -n k3k-test-storage
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
varlibrancherk3s-k3k-test-storage-server-0 Pending not-available <unset> 4s
```
The `server` is pending:
```bash
kubectl get po -n k3k-test-storage
NAME READY STATUS RESTARTS AGE
k3k-test-storage-kubelet-j4zn5 1/1 Running 0 54s
k3k-test-storage-server-0 0/1 Pending 0 54s
```
To fix this you should use a valid storage class, you can list existing storage class using:
```bash
kubectl get storageclasses.storage.k8s.io
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 3d6h
```
### Wrong node selector
When creating a Virtual Cluster with `defaultNodeSelector`, if the selector is not valid all pods will be pending.
#### Example
The `server` is pending:
```bash
kubectl get po
NAME READY STATUS RESTARTS AGE
k3k-k3kcluster-node-placed-server-0 0/1 Pending 0 58s
```
The description of the pod provide the reason:
```bash
kubectl describe po k3k-k3kcluster-node-placed-server-0
...
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 84s default-scheduler 0/1 nodes are available: 1 node(s) didn't match Pod's node affinity/selector. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.
```
To fix this you should use a valid node affinity/selector.
### Image pull issues (airgapped setup)
When creating a Virtual Cluster in air-gapped environment, images need to be available in the configured registry. You can check for `ImagePullBackOff` status when getting the pods in the virtual cluster namespace.
#### Example
The `server` is failing:
```bash
kubectl get po -n k3k-test-registry
NAME READY STATUS RESTARTS AGE
k3k-test-registry-kubelet-r4zh5 1/1 Running 0 54s
k3k-test-registry-server-0 0/1 ImagePullBackOff 0 54s
```
To fix this make sure the failing image is available. You can describe the failing pod to get more details.

View File

@@ -0,0 +1,147 @@
# VirtualClusterPolicy
The VirtualClusterPolicy Custom Resource in K3k provides a way to define and enforce consistent configurations, security settings, and resource management rules for your virtual clusters and the Namespaces they operate within.
By using VCPs, administrators can centrally manage these aspects, reducing manual configuration, ensuring alignment with organizational standards, and enhancing the overall security and operational consistency of the K3k environment.
## Core Concepts
### What is a VirtualClusterPolicy?
A `VirtualClusterPolicy` is a cluster-scoped Kubernetes Custom Resource that specifies a set of rules and configurations. These policies are then applied to K3k virtual clusters (`Cluster` resources) operating within Kubernetes Namespaces that are explicitly bound to a VCP.
### Binding a Policy to a Namespace
To apply a `VirtualClusterPolicy` to one or more Namespaces (and thus to all K3k `Cluster` resources within those Namespaces), you need to label the desired Namespace(s). Add the following label to your Namespace metadata:
`policy.k3k.io/policy-name: <YOUR_POLICY_NAME>`
**Example: Labeling a Namespace**
```yaml
apiVersion: v1
kind: Namespace
metadata:
name: my-app-namespace
labels:
policy.k3k.io/policy-name: "standard-dev-policy"
```
In this example, `my-app-namespace` will adhere to the rules defined in the `VirtualClusterPolicy` named `standard-dev-policy`. Multiple Namespaces can be bound to the same policy for uniform configuration, or different Namespaces can be bound to distinct policies.
It's also important to note what happens when a Namespace's policy binding changes. If a Namespace is unbound from a VirtualClusterPolicy (by removing the policy.k3k.io/policy-name label), K3k will clean up and remove the resources (such as ResourceQuotas, LimitRanges, and managed Namespace labels) that were originally applied by that policy. Similarly, if the label is changed to bind the Namespace to a new VirtualClusterPolicy, K3k will first remove the resources associated with the old policy before applying the configurations from the new one, ensuring a clean transition.
### Default Policy Values
If you create a `VirtualClusterPolicy` without specifying any `spec` fields (e.g., using `k3kcli policy create my-default-policy`), it will be created with default settings. Currently, this includes `spec.allowedMode` being set to `"shared"`.
```yaml
# Example of a minimal VCP (after creation with defaults)
apiVersion: k3k.io/v1alpha1
kind: VirtualClusterPolicy
metadata:
name: my-default-policy
spec:
allowedMode: shared
```
## Key Capabilities & Examples
A `VirtualClusterPolicy` can configure several aspects of the Namespaces it's bound to and the virtual clusters operating within them.
### 1. Restricting Allowed Virtual Cluster Modes (`AllowedMode`)
You can restrict the `mode` (e.g., "shared" or "virtual") in which K3k `Cluster` resources can be provisioned within bound Namespaces. If a `Cluster` is created in a bound Namespace with a mode not allowed in `allowedMode`, its creation might proceed but an error should be reported in the `Cluster` resource's status.
**Example:** Allow only "shared" mode clusters.
```yaml
apiVersion: k3k.io/v1alpha1
kind: VirtualClusterPolicy
metadata:
name: shared-only-policy
spec:
allowedModeTypes:
- shared
```
You can also specify this using the CLI: `k3kcli policy create --mode shared shared-only-policy` (or `--mode virtual`).
### 2. Defining Resource Quotas (`quota`)
You can define resource consumption limits for bound Namespaces by specifying a `ResourceQuota`. K3k will create a `ResourceQuota` object in each bound Namespace with the provided specifications.
**Example:** Set CPU, memory, and pod limits.
```yaml
apiVersion: k3k.io/v1alpha1
kind: VirtualClusterPolicy
metadata:
name: quota-policy
spec:
quota:
hard:
cpu: "10"
memory: "20Gi"
pods: "10"
```
### 3. Setting Limit Ranges (`limit`)
You can define default resource requests/limits and min/max constraints for containers running in bound Namespaces by specifying a `LimitRange`. K3k will create a `LimitRange` object in each bound Namespace.
**Example:** Define default CPU requests/limits and min/max CPU.
```yaml
apiVersion: k3k.io/v1alpha1
kind: VirtualClusterPolicy
metadata:
name: limit-policy
spec:
limit:
limits:
- default:
cpu: "500m"
defaultRequest:
cpu: "500m"
max:
cpu: "1"
min:
cpu: "100m"
type: Container
```
### 4. Managing Network Isolation (`disableNetworkPolicy`)
By default, K3k creates a `NetworkPolicy` in bound Namespaces to provide network isolation for virtual clusters (especially in shared mode). You can disable the creation of this default policy.
**Example:** Disable the default NetworkPolicy.
```yaml
apiVersion: k3k.io/v1alpha1
kind: VirtualClusterPolicy
metadata:
name: no-default-netpol-policy
spec:
disableNetworkPolicy: true
```
### 5. Enforcing Pod Security Admission (`podSecurityAdmissionLevel`)
You can enforce Pod Security Standards (PSS) by specifying a Pod Security Admission (PSA) level. K3k will apply the corresponding PSA labels to each bound Namespace. The allowed values are `privileged`, `baseline`, `restricted`, and this will add labels like `pod-security.kubernetes.io/enforce: <level>` to the bound Namespace.
**Example:** Enforce the "baseline" PSS level.
```yaml
apiVersion: k3k.io/v1alpha1
kind: VirtualClusterPolicy
metadata:
name: baseline-psa-policy
spec:
podSecurityAdmissionLevel: baseline
```
## Further Reading
* For a complete reference of all `VirtualClusterPolicy` spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crd-docs.md#virtualclusterpolicy).
* To understand how VCPs fit into the overall K3k system, see the [Architecture](./architecture.md) document.

View File

@@ -1,11 +1,9 @@
apiVersion: k3k.io/v1alpha1
kind: ClusterSet
kind: VirtualClusterPolicy
metadata:
name: clusterset-example
name: policy-example
# spec:
# disableNetworkPolicy: false
# allowedNodeTypes:
# - "shared"
# - "virtual"
# allowedMode: "shared"
# podSecurityAdmissionLevel: "baseline"
# defaultPriorityClass: "lowpriority"

107
go.mod
View File

@@ -1,40 +1,55 @@
module github.com/rancher/k3k
go 1.23.4
go 1.24.2
replace (
github.com/google/cel-go => github.com/google/cel-go v0.17.7
github.com/google/cel-go => github.com/google/cel-go v0.20.1
github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0
github.com/prometheus/client_model => github.com/prometheus/client_model v0.6.1
github.com/prometheus/common => github.com/prometheus/common v0.47.0
github.com/prometheus/common => github.com/prometheus/common v0.64.0
golang.org/x/term => golang.org/x/term v0.15.0
)
require (
github.com/go-logr/zapr v1.3.0
github.com/google/go-cmp v0.7.0
github.com/onsi/ginkgo/v2 v2.21.0
github.com/onsi/gomega v1.36.0
github.com/prometheus/client_model v0.6.1
github.com/rancher/dynamiclistener v1.27.5
github.com/sirupsen/logrus v1.9.3
github.com/spf13/viper v1.20.1
github.com/stretchr/testify v1.10.0
github.com/testcontainers/testcontainers-go v0.35.0
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0
github.com/urfave/cli/v2 v2.27.5
github.com/virtual-kubelet/virtual-kubelet v1.11.0
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803
go.etcd.io/etcd/api/v3 v3.5.16
go.etcd.io/etcd/client/v3 v3.5.16
go.uber.org/zap v1.27.0
gopkg.in/yaml.v2 v2.4.0
helm.sh/helm/v3 v3.14.4
k8s.io/api v0.29.11
k8s.io/apimachinery v0.29.11
k8s.io/apiserver v0.29.11
k8s.io/client-go v0.29.11
k8s.io/component-base v0.29.11
k8s.io/component-helpers v0.29.11
k8s.io/api v0.31.4
k8s.io/apiextensions-apiserver v0.31.4
k8s.io/apimachinery v0.31.4
k8s.io/apiserver v0.31.4
k8s.io/cli-runtime v0.31.4
k8s.io/client-go v0.31.4
k8s.io/component-base v0.31.4
k8s.io/component-helpers v0.31.4
k8s.io/kubectl v0.31.4
k8s.io/kubelet v0.31.4
k8s.io/kubernetes v1.31.4
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/controller-runtime v0.17.5
sigs.k8s.io/controller-runtime v0.19.4
)
require (
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
)
require (
@@ -49,7 +64,6 @@ require (
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
@@ -63,7 +77,7 @@ require (
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/dockercfg v0.3.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
@@ -80,7 +94,8 @@ require (
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
@@ -97,12 +112,11 @@ require (
github.com/google/btree v1.1.3 // indirect
github.com/google/cel-go v0.22.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
@@ -116,7 +130,7 @@ require (
github.com/jmoiron/sqlx v1.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/lib/pq v1.10.9 // indirect
@@ -151,63 +165,62 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.2
github.com/prometheus/common v0.64.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rubenv/sql-migrate v1.7.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
go.opentelemetry.io/otel v1.28.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
go.opentelemetry.io/otel v1.33.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
go.opentelemetry.io/otel/trace v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.33.0 // indirect
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
go.opentelemetry.io/otel/trace v1.33.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/crypto v0.38.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.33.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.14.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/term v0.32.0 // indirect
golang.org/x/text v0.25.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.26.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
google.golang.org/grpc v1.65.0 // indirect
google.golang.org/protobuf v1.35.1 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect
google.golang.org/grpc v1.67.3 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.29.11 // indirect
k8s.io/cli-runtime v0.29.11 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kms v0.29.11 // indirect
k8s.io/kms v0.31.4 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
k8s.io/kubectl v0.29.11 // indirect
oras.land/oras-go v1.2.5 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect

1609
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
set -x
CODEGEN_GIT_PKG=https://github.com/kubernetes/code-generator.git
git clone --depth 1 ${CODEGEN_GIT_PKG} || true
K8S_VERSION=$(cat go.mod | grep -m1 "k8s.io/apiserver" | cut -d " " -f 2)
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
CODEGEN_PKG=./code-generator
# cd into the git dir to checkout the code gen version compatible with the k8s version that this is using
cd $CODEGEN_PKG
git fetch origin tag ${K8S_VERSION}
git checkout ${K8S_VERSION}
cd -
source ${CODEGEN_PKG}/kube_codegen.sh
kube::codegen::gen_helpers \
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
--input-pkg-root "${SCRIPT_ROOT}/pkg/apis" \
--output-base "${SCRIPT_ROOT}/pkg/apis"
rm -rf code-generator

View File

@@ -2,73 +2,22 @@ package main
import (
"errors"
"os"
"gopkg.in/yaml.v2"
)
// config has all virtual-kubelet startup options
type config struct {
ClusterName string `yaml:"clusterName,omitempty"`
ClusterNamespace string `yaml:"clusterNamespace,omitempty"`
ServiceName string `yaml:"serviceName,omitempty"`
Token string `yaml:"token,omitempty"`
AgentHostname string `yaml:"agentHostname,omitempty"`
HostConfigPath string `yaml:"hostConfigPath,omitempty"`
VirtualConfigPath string `yaml:"virtualConfigPath,omitempty"`
KubeletPort string `yaml:"kubeletPort,omitempty"`
ServerIP string `yaml:"serverIP,omitempty"`
Version string `yaml:"version,omitempty"`
}
func (c *config) unmarshalYAML(data []byte) error {
var conf config
if err := yaml.Unmarshal(data, &conf); err != nil {
return err
}
if c.ClusterName == "" {
c.ClusterName = conf.ClusterName
}
if c.ClusterNamespace == "" {
c.ClusterNamespace = conf.ClusterNamespace
}
if c.HostConfigPath == "" {
c.HostConfigPath = conf.HostConfigPath
}
if c.VirtualConfigPath == "" {
c.VirtualConfigPath = conf.VirtualConfigPath
}
if c.KubeletPort == "" {
c.KubeletPort = conf.KubeletPort
}
if c.AgentHostname == "" {
c.AgentHostname = conf.AgentHostname
}
if c.ServiceName == "" {
c.ServiceName = conf.ServiceName
}
if c.Token == "" {
c.Token = conf.Token
}
if c.ServerIP == "" {
c.ServerIP = conf.ServerIP
}
if c.Version == "" {
c.Version = conf.Version
}
return nil
ClusterName string `mapstructure:"clusterName"`
ClusterNamespace string `mapstructure:"clusterNamespace"`
ServiceName string `mapstructure:"serviceName"`
Token string `mapstructure:"token"`
AgentHostname string `mapstructure:"agentHostname"`
HostKubeconfig string `mapstructure:"hostKubeconfig"`
VirtKubeconfig string `mapstructure:"virtKubeconfig"`
KubeletPort int `mapstructure:"kubeletPort"`
WebhookPort int `mapstructure:"webhookPort"`
ServerIP string `mapstructure:"serverIP"`
Version string `mapstructure:"version"`
MirrorHostNodes bool `mapstructure:"mirrorHostNodes"`
}
func (c *config) validate() error {
@@ -86,16 +35,3 @@ func (c *config) validate() error {
return nil
}
func (c *config) parse(path string) error {
if _, err := os.Stat(path); os.IsNotExist(err) {
return nil
}
b, err := os.ReadFile(path)
if err != nil {
return err
}
return c.unmarshalYAML(b)
}

View File

@@ -5,17 +5,21 @@ import (
"fmt"
"sync"
"github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
)
const ConfigMapSyncerName = "configmap-syncer"
type ConfigMapSyncer struct {
mutex sync.RWMutex
// VirtualClient is the client for the virtual cluster
@@ -32,6 +36,10 @@ type ConfigMapSyncer struct {
objs sets.Set[types.NamespacedName]
}
func (c *ConfigMapSyncer) Name() string {
return ConfigMapSyncerName
}
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
if !c.isWatching(req.NamespacedName) {
@@ -119,6 +127,7 @@ func (c *ConfigMapSyncer) AddResource(ctx context.Context, namespace, name strin
// lock in write mode since we are now adding the key
c.mutex.Lock()
if c.objs == nil {
c.objs = sets.Set[types.NamespacedName]{}
}
@@ -129,7 +138,6 @@ func (c *ConfigMapSyncer) AddResource(ctx context.Context, namespace, name strin
_, err := c.Reconcile(ctx, reconcile.Request{
NamespacedName: objKey,
})
if err != nil {
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
}
@@ -158,6 +166,7 @@ func (c *ConfigMapSyncer) RemoveResource(ctx context.Context, namespace, name st
}
c.mutex.Lock()
if c.objs == nil {
c.objs = sets.Set[types.NamespacedName]{}
}

View File

@@ -0,0 +1,169 @@
package controller_test
import (
"context"
"errors"
"os"
"path"
"path/filepath"
"testing"
"github.com/go-logr/zapr"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestController(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Cluster Controller Suite")
}
type TestEnv struct {
*envtest.Environment
k8s *kubernetes.Clientset
k8sClient client.Client
}
var (
hostTestEnv *TestEnv
hostManager ctrl.Manager
virtTestEnv *TestEnv
virtManager ctrl.Manager
)
var _ = BeforeSuite(func() {
hostTestEnv = NewTestEnv()
By("HOST testEnv running at :" + hostTestEnv.ControlPlane.APIServer.Port)
virtTestEnv = NewTestEnv()
By("VIRT testEnv running at :" + virtTestEnv.ControlPlane.APIServer.Port)
ctrl.SetLogger(zapr.NewLogger(zap.NewNop()))
ctrl.SetupSignalHandler()
})
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := hostTestEnv.Stop()
Expect(err).NotTo(HaveOccurred())
err = virtTestEnv.Stop()
Expect(err).NotTo(HaveOccurred())
tmpKubebuilderDir := path.Join(os.TempDir(), "kubebuilder")
err = os.RemoveAll(tmpKubebuilderDir)
Expect(err).NotTo(HaveOccurred())
})
func NewTestEnv() *TestEnv {
GinkgoHelper()
binaryAssetsDirectory := os.Getenv("KUBEBUILDER_ASSETS")
if binaryAssetsDirectory == "" {
binaryAssetsDirectory = "/usr/local/kubebuilder/bin"
}
tmpKubebuilderDir := path.Join(os.TempDir(), "kubebuilder")
if err := os.Mkdir(tmpKubebuilderDir, 0o755); !errors.Is(err, os.ErrExist) {
Expect(err).NotTo(HaveOccurred())
}
tempDir, err := os.MkdirTemp(tmpKubebuilderDir, "envtest-*")
Expect(err).NotTo(HaveOccurred())
err = os.CopyFS(tempDir, os.DirFS(binaryAssetsDirectory))
Expect(err).NotTo(HaveOccurred())
By("bootstrapping test environment")
testEnv := &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "charts", "k3k", "crds")},
ErrorIfCRDPathMissing: true,
BinaryAssetsDirectory: tempDir,
Scheme: buildScheme(),
}
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
k8s, err := kubernetes.NewForConfig(cfg)
Expect(err).NotTo(HaveOccurred())
k8sClient, err := client.New(cfg, client.Options{Scheme: testEnv.Scheme})
Expect(err).NotTo(HaveOccurred())
return &TestEnv{
Environment: testEnv,
k8s: k8s,
k8sClient: k8sClient,
}
}
func buildScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
err := clientgoscheme.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
}
var _ = Describe("Kubelet Controller", func() {
var (
ctx context.Context
cancel context.CancelFunc
)
BeforeEach(func() {
var err error
ctx, cancel = context.WithCancel(context.Background())
hostManager, err = ctrl.NewManager(hostTestEnv.Config, ctrl.Options{
// disable the metrics server
Metrics: metricsserver.Options{BindAddress: "0"},
Scheme: hostTestEnv.Scheme,
})
Expect(err).NotTo(HaveOccurred())
virtManager, err = ctrl.NewManager(virtTestEnv.Config, ctrl.Options{
// disable the metrics server
Metrics: metricsserver.Options{BindAddress: "0"},
Scheme: virtTestEnv.Scheme,
})
Expect(err).NotTo(HaveOccurred())
go func() {
defer GinkgoRecover()
err := hostManager.Start(ctx)
Expect(err).NotTo(HaveOccurred(), "failed to run host manager")
}()
go func() {
defer GinkgoRecover()
err := virtManager.Start(ctx)
Expect(err).NotTo(HaveOccurred(), "failed to run virt manager")
}()
})
AfterEach(func() {
cancel()
})
Describe("PriorityClass", PriorityClassTests)
})

View File

@@ -5,15 +5,17 @@ import (
"fmt"
"sync"
"github.com/rancher/k3k/k3k-kubelet/translate"
k3klog "github.com/rancher/k3k/pkg/log"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/k3k-kubelet/translate"
k3klog "github.com/rancher/k3k/pkg/log"
)
type ControllerHandler struct {
@@ -39,6 +41,7 @@ type ControllerHandler struct {
// be altered through the Add and Remove methods
type updateableReconciler interface {
reconcile.Reconciler
Name() string
AddResource(ctx context.Context, namespace string, name string) error
RemoveResource(ctx context.Context, namespace string, name string) error
}
@@ -50,6 +53,7 @@ func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object)
if controllers != nil {
if r, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]; ok {
err := r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
c.RUnlock()
return err
@@ -97,14 +101,15 @@ func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object)
}
err := ctrl.NewControllerManagedBy(c.Mgr).
Named(r.Name()).
For(&v1.ConfigMap{}).
Complete(r)
if err != nil {
return fmt.Errorf("unable to start configmap controller: %w", err)
}
c.Lock()
if c.controllers == nil {
c.controllers = map[schema.GroupVersionKind]updateableReconciler{}
}

View File

@@ -3,19 +3,19 @@ package controller
import (
"context"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/log"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
)
const (
@@ -24,44 +24,44 @@ const (
)
type PVCReconciler struct {
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
clusterName string
clusterNamespace string
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
logger *log.Logger
Translator translate.ToHostTranslator
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
}
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
translator := translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := PVCReconciler{
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
logger: logger.Named(pvcController),
Translator: translator,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
}
return ctrl.NewControllerManagedBy(virtMgr).
Named(pvcController).
For(&v1.PersistentVolumeClaim{}).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Complete(&reconciler)
}
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := r.logger.With("Cluster", r.clusterName, "PersistentVolumeClaim", req.NamespacedName)
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
virtPVC v1.PersistentVolumeClaim
@@ -72,7 +72,6 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
return reconcile.Result{}, err
}
// handling persistent volume sync
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}

View File

@@ -3,19 +3,19 @@ package controller
import (
"context"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/log"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/component-helpers/storage/volume"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
)
const (
@@ -24,44 +24,44 @@ const (
)
type PodReconciler struct {
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
clusterName string
clusterNamespace string
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
logger *log.Logger
Translator translate.ToHostTranslator
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
}
// AddPodPVCController adds pod controller to k3k-kubelet
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
translator := translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := PodReconciler{
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
logger: logger.Named(podController),
Translator: translator,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
}
return ctrl.NewControllerManagedBy(virtMgr).
Named(podController).
For(&v1.Pod{}).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Complete(&reconciler)
}
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
virtPod v1.Pod
@@ -72,7 +72,6 @@ func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
return reconcile.Result{}, err
}
// handling pod
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
@@ -95,6 +94,7 @@ func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
// and then created on the host, the PV is not synced to the host cluster.
func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pvcSource *v1.PersistentVolumeClaimVolumeSource) error {
log := ctrl.LoggerFrom(ctx).WithValues("PersistentVolumeClaim", pvcSource.ClaimName)
ctx = ctrl.LoggerInto(ctx, log)
var pvc v1.PersistentVolumeClaim

View File

@@ -0,0 +1,229 @@
package controller_test
import (
"context"
"fmt"
"time"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var PriorityClassTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
)
BeforeEach(func() {
ctx := context.Background()
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
}
err := hostTestEnv.k8sClient.Create(ctx, &ns)
Expect(err).NotTo(HaveOccurred())
namespace = ns.Name
cluster = v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
err = controller.AddPriorityClassReconciler(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
Expect(err).NotTo(HaveOccurred())
})
It("creates a priorityClass on the host cluster", func() {
ctx := context.Background()
priorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pc-",
Labels: map[string]string{
"foo": "bar",
},
},
Value: 1001,
}
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostPriorityClassName}
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created priorityClass %s in host cluster", hostPriorityClassName))
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
Expect(hostPriorityClass.Labels).To(ContainElement("bar"))
GinkgoWriter.Printf("labels: %v\n", hostPriorityClass.Labels)
})
It("updates a priorityClass on the host cluster", func() {
ctx := context.Background()
priorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
Value: 1001,
}
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostPriorityClassName}
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created priorityClass %s in host cluster", hostPriorityClassName))
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
Expect(hostPriorityClass.Labels).NotTo(ContainElement("bar"))
key := client.ObjectKeyFromObject(priorityClass)
err = virtTestEnv.k8sClient.Get(ctx, key, priorityClass)
Expect(err).NotTo(HaveOccurred())
priorityClass.Labels = map[string]string{"foo": "bar"}
// update virtual priorityClass
err = virtTestEnv.k8sClient.Update(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
Expect(priorityClass.Labels).To(ContainElement("bar"))
// check hostPriorityClass
Eventually(func() map[string]string {
key := client.ObjectKey{Name: hostPriorityClassName}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
Expect(err).NotTo(HaveOccurred())
return hostPriorityClass.Labels
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(ContainElement("bar"))
})
It("deletes a priorityClass on the host cluster", func() {
ctx := context.Background()
priorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
Value: 1001,
}
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostPriorityClassName}
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created priorityClass %s in host cluster", hostPriorityClassName))
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
err = virtTestEnv.k8sClient.Delete(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
Eventually(func() bool {
key := client.ObjectKey{Name: hostPriorityClassName}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeTrue())
})
It("creates a priorityClass on the host cluster with the globalDefault annotation", func() {
ctx := context.Background()
priorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
Value: 1001,
GlobalDefault: true,
}
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostPriorityClassName}
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
By(fmt.Sprintf("Created priorityClass %s in host cluster without the GlobalDefault value", hostPriorityClassName))
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
Expect(hostPriorityClass.GlobalDefault).To(BeFalse())
Expect(hostPriorityClass.Annotations[controller.PriorityClassGlobalDefaultAnnotation]).To(Equal("true"))
})
}
func translateName(cluster v1alpha1.Cluster, namespace, name string) string {
translator := translate.ToHostTranslator{
ClusterName: cluster.Name,
ClusterNamespace: cluster.Namespace,
}
return translator.TranslateName(namespace, name)
}

View File

@@ -0,0 +1,159 @@
package controller
import (
"context"
"strings"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
schedulingv1 "k8s.io/api/scheduling/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
)
const (
PriorityClassGlobalDefaultAnnotation = "priorityclass.k3k.io/globalDefault"
priorityClassControllerName = "priorityclass-syncer-controller"
priorityClassFinalizerName = "priorityclass.k3k.io/finalizer"
)
type PriorityClassReconciler struct {
clusterName string
clusterNamespace string
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
}
// AddPriorityClassReconciler adds a PriorityClass reconciler to k3k-kubelet
func AddPriorityClassReconciler(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
translator := translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := PriorityClassReconciler{
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
}
name := translator.TranslateName("", priorityClassControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(name).
For(&schedulingv1.PriorityClass{}).
WithEventFilter(ignoreSystemPrefixPredicate).
Complete(&reconciler)
}
// IgnoreSystemPrefixPredicate filters out resources whose names start with "system-".
var ignoreSystemPrefixPredicate = predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
return !strings.HasPrefix(e.ObjectOld.GetName(), "system-")
},
CreateFunc: func(e event.CreateEvent) bool {
return !strings.HasPrefix(e.Object.GetName(), "system-")
},
DeleteFunc: func(e event.DeleteEvent) bool {
return !strings.HasPrefix(e.Object.GetName(), "system-")
},
GenericFunc: func(e event.GenericEvent) bool {
return !strings.HasPrefix(e.Object.GetName(), "system-")
},
}
func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
priorityClass schedulingv1.PriorityClass
cluster v1alpha1.Cluster
)
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.virtualClient.Get(ctx, req.NamespacedName, &priorityClass); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
hostPriorityClass := r.translatePriorityClass(priorityClass)
// handle deletion
if !priorityClass.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
// TODO add test for previous implementation without err != nil check, and also check the other controllers
if err := r.hostClient.Delete(ctx, hostPriorityClass); err != nil && !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// remove the finalizer after cleaning up the synced service
if controllerutil.RemoveFinalizer(&priorityClass, priorityClassFinalizerName) {
if err := r.virtualClient.Update(ctx, &priorityClass); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&priorityClass, priorityClassFinalizerName) {
if err := r.virtualClient.Update(ctx, &priorityClass); err != nil {
return reconcile.Result{}, err
}
}
// create the priorityClass on the host
log.Info("creating the priorityClass for the first time on the host cluster")
err := r.hostClient.Create(ctx, hostPriorityClass)
if err != nil {
if !apierrors.IsAlreadyExists(err) {
return reconcile.Result{}, err
}
return reconcile.Result{}, r.hostClient.Update(ctx, hostPriorityClass)
}
return reconcile.Result{}, nil
}
func (r *PriorityClassReconciler) translatePriorityClass(priorityClass schedulingv1.PriorityClass) *schedulingv1.PriorityClass {
hostPriorityClass := priorityClass.DeepCopy()
r.Translator.TranslateTo(hostPriorityClass)
if hostPriorityClass.Annotations == nil {
hostPriorityClass.Annotations = make(map[string]string)
}
if hostPriorityClass.GlobalDefault {
hostPriorityClass.GlobalDefault = false
hostPriorityClass.Annotations[PriorityClassGlobalDefaultAnnotation] = "true"
}
return hostPriorityClass
}

View File

@@ -5,17 +5,21 @@ import (
"fmt"
"sync"
"github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
)
const SecretSyncerName = "secret-syncer"
type SecretSyncer struct {
mutex sync.RWMutex
// VirtualClient is the client for the virtual cluster
@@ -32,6 +36,10 @@ type SecretSyncer struct {
objs sets.Set[types.NamespacedName]
}
func (s *SecretSyncer) Name() string {
return SecretSyncerName
}
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
if !s.isWatching(req.NamespacedName) {
@@ -111,12 +119,15 @@ func (s *SecretSyncer) AddResource(ctx context.Context, namespace, name string)
Namespace: namespace,
Name: name,
}
// if we already sync this object, no need to writelock/add it
if s.isWatching(objKey) {
return nil
}
// lock in write mode since we are now adding the key
s.mutex.Lock()
if s.objs == nil {
s.objs = sets.Set[types.NamespacedName]{}
}
@@ -127,7 +138,6 @@ func (s *SecretSyncer) AddResource(ctx context.Context, namespace, name string)
_, err := s.Reconcile(ctx, reconcile.Request{
NamespacedName: objKey,
})
if err != nil {
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
}
@@ -156,6 +166,7 @@ func (s *SecretSyncer) RemoveResource(ctx context.Context, namespace, name strin
}
s.mutex.Lock()
if s.objs == nil {
s.objs = sets.Set[types.NamespacedName]{}
}
@@ -168,11 +179,11 @@ func (s *SecretSyncer) RemoveResource(ctx context.Context, namespace, name strin
func (s *SecretSyncer) removeHostSecret(ctx context.Context, virtualNamespace, virtualName string) error {
var vSecret corev1.Secret
err := s.VirtualClient.Get(ctx, types.NamespacedName{
Namespace: virtualNamespace,
Name: virtualName,
}, &vSecret)
if err != nil {
return fmt.Errorf("unable to get virtual secret %s/%s: %w", virtualNamespace, virtualName, err)
}

View File

@@ -3,67 +3,64 @@ package controller
import (
"context"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/log"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
)
const (
serviceSyncerController = "service-syncer-controller"
maxConcurrentReconciles = 1
serviceFinalizerName = "service.k3k.io/finalizer"
)
type ServiceReconciler struct {
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
clusterName string
clusterNamespace string
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
logger *log.Logger
Translator translate.ToHostTranslator
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
Translator translate.ToHostTranslator
}
// AddServiceSyncer adds service syncer controller to the manager of the virtual cluster
func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
translator := translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := ServiceReconciler{
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
logger: logger.Named(serviceSyncerController),
Translator: translator,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
Translator: translator,
}
return ctrl.NewControllerManagedBy(virtMgr).
Named(serviceSyncerController).
For(&v1.Service{}).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Complete(&reconciler)
}
func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := s.logger.With("Cluster", s.clusterName, "Service", req.NamespacedName)
func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
if req.Name == "kubernetes" || req.Name == "kube-dns" {
return reconcile.Result{}, nil
@@ -71,27 +68,26 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
var (
virtService v1.Service
hostService v1.Service
cluster v1alpha1.Cluster
)
// getting the cluster for setting the controller reference
if err := s.hostClient.Get(ctx, types.NamespacedName{Name: s.clusterName, Namespace: s.clusterNamespace}, &cluster); err != nil {
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := s.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedService := s.service(&virtService)
if err := controllerutil.SetControllerReference(&cluster, syncedService, s.HostScheme); err != nil {
syncedService := r.service(&virtService)
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostScheme); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtService.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
if err := s.hostClient.Delete(ctx, syncedService); err != nil {
if err := r.hostClient.Delete(ctx, syncedService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
@@ -99,7 +95,7 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
if controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName)
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
if err := r.virtualClient.Update(ctx, &virtService); err != nil {
return reconcile.Result{}, err
}
}
@@ -111,15 +107,17 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
if !controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
controllerutil.AddFinalizer(&virtService, serviceFinalizerName)
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
if err := r.virtualClient.Update(ctx, &virtService); err != nil {
return reconcile.Result{}, err
}
}
// create or update the service on host
if err := s.hostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: s.clusterNamespace}, &hostService); err != nil {
var hostService v1.Service
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: r.clusterNamespace}, &hostService); err != nil {
if apierrors.IsNotFound(err) {
log.Info("creating the service for the first time on the host cluster")
return reconcile.Result{}, s.hostClient.Create(ctx, syncedService)
return reconcile.Result{}, r.hostClient.Create(ctx, syncedService)
}
return reconcile.Result{}, err
@@ -127,7 +125,7 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
log.Info("updating service on the host cluster")
return reconcile.Result{}, s.hostClient.Update(ctx, syncedService)
return reconcile.Result{}, r.hostClient.Update(ctx, syncedService)
}
func (s *ServiceReconciler) service(obj *v1.Service) *v1.Service {

View File

@@ -7,24 +7,25 @@ import (
"strconv"
"strings"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/log"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/manager"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/log"
)
const (
webhookName = "podmutator.k3k.io"
webhookTimeout = int32(10)
webhookPort = "9443"
webhookPath = "/mutate--v1-pod"
FieldpathField = "k3k.io/fieldpath"
)
@@ -36,12 +37,13 @@ type webhookHandler struct {
clusterName string
clusterNamespace string
logger *log.Logger
webhookPort int
}
// AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to
// modify the nodeName of the created pods with the name of the virtual kubelet node name
// as well as remove any status fields of the downward apis env fields
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger) error {
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger, webhookPort int) error {
handler := webhookHandler{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
@@ -49,6 +51,7 @@ func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient c
serviceName: serviceName,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
webhookPort: webhookPort,
}
// create mutator webhook configuration to the cluster
@@ -99,9 +102,7 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
w.logger.Infow("extracting webhook tls from host cluster")
var (
webhookTLSSecret v1.Secret
)
var webhookTLSSecret v1.Secret
if err := hostClient.Get(ctx, types.NamespacedName{Name: agent.WebhookSecretName(w.clusterName), Namespace: w.clusterNamespace}, &webhookTLSSecret); err != nil {
return nil, err
@@ -112,7 +113,7 @@ func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlrunti
return nil, errors.New("webhook CABundle does not exist in secret")
}
webhookURL := "https://" + w.serviceName + ":" + webhookPort + webhookPath
webhookURL := fmt.Sprintf("https://%s:%d%s", w.serviceName, w.webhookPort, webhookPath)
return &admissionregistrationv1.MutatingWebhookConfiguration{
TypeMeta: metav1.TypeMeta{

View File

@@ -8,9 +8,33 @@ import (
"fmt"
"net"
"net/http"
"os"
"time"
"github.com/go-logr/zapr"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/node"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/webhook"
certutil "github.com/rancher/dynamiclistener/cert"
v1 "k8s.io/api/core/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
k3kkubeletcontroller "github.com/rancher/k3k/k3k-kubelet/controller"
k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider"
@@ -20,26 +44,6 @@ import (
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
k3klog "github.com/rancher/k3k/pkg/log"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/node"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/kubernetes"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
var (
@@ -71,7 +75,7 @@ type kubelet struct {
}
func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet, error) {
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostConfigPath)
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostKubeconfig)
if err != nil {
return nil, err
}
@@ -83,7 +87,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, err
}
virtConfig, err := virtRestConfig(ctx, c.VirtualConfigPath, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
virtConfig, err := virtRestConfig(ctx, c.VirtKubeconfig, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
if err != nil {
return nil, err
}
@@ -93,13 +97,23 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, err
}
ctrl.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
hostMetricsBindAddress := ":8083"
virtualMetricsBindAddress := ":8084"
if c.MirrorHostNodes {
hostMetricsBindAddress = "0"
virtualMetricsBindAddress = "0"
}
hostMgr, err := ctrl.NewManager(hostConfig, manager.Options{
Scheme: baseScheme,
LeaderElection: true,
LeaderElectionNamespace: c.ClusterNamespace,
LeaderElectionID: c.ClusterName,
Metrics: ctrlserver.Options{
BindAddress: ":8083",
BindAddress: hostMetricsBindAddress,
},
Cache: cache.Options{
DefaultNamespaces: map[string]cache.Config{
@@ -119,6 +133,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
webhookServer := webhook.NewServer(webhook.Options{
CertDir: "/opt/rancher/k3k-webhook",
Port: c.WebhookPort,
})
virtualMgr, err := ctrl.NewManager(virtConfig, manager.Options{
@@ -128,38 +143,43 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
LeaderElectionNamespace: "kube-system",
LeaderElectionID: c.ClusterName,
Metrics: ctrlserver.Options{
BindAddress: ":8084",
BindAddress: virtualMetricsBindAddress,
},
})
if err != nil {
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
}
logger.Info("adding pod mutator webhook")
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger); err != nil {
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error())
}
logger.Info("adding service syncer controller")
if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add service syncer controller: " + err.Error())
}
logger.Info("adding pvc syncer controller")
if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add pvc syncer controller: " + err.Error())
}
logger.Info("adding pod pvc controller")
if err := k3kkubeletcontroller.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
if err := k3kkubeletcontroller.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add pod pvc controller: " + err.Error())
}
logger.Info("adding priorityclass controller")
if err := k3kkubeletcontroller.AddPriorityClassReconciler(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("failed to add priorityclass controller: " + err.Error())
}
clusterIP, err := clusterIP(ctx, c.ServiceName, c.ClusterNamespace, hostClient)
if err != nil {
return nil, errors.New("failed to extract the clusterIP for the server service: " + err.Error())
@@ -192,6 +212,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
logger: logger.Named(k3kKubeletName),
token: c.Token,
dnsIP: dnsService.Spec.ClusterIP,
port: c.KubeletPort,
}, nil
}
@@ -210,9 +231,9 @@ func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostCl
return service.Spec.ClusterIP, nil
}
func (k *kubelet) registerNode(ctx context.Context, agentIP, srvPort, namespace, name, hostname, serverIP, dnsIP, version string) error {
providerFunc := k.newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version)
nodeOpts := k.nodeOpts(ctx, srvPort, namespace, name, hostname, agentIP)
func (k *kubelet) registerNode(ctx context.Context, agentIP string, cfg config) error {
providerFunc := k.newProviderFunc(cfg)
nodeOpts := k.nodeOpts(ctx, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
var err error
@@ -263,22 +284,22 @@ func (k *kubelet) start(ctx context.Context) {
k.logger.Info("node exited successfully")
}
func (k *kubelet) newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version string) nodeutil.NewProviderFunc {
func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
return func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) {
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, namespace, name, serverIP, dnsIP)
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, cfg.ClusterNamespace, cfg.ClusterName, cfg.ServerIP, k.dnsIP)
if err != nil {
return nil, nil, errors.New("unable to make nodeutil provider: " + err.Error())
}
provider.ConfigureNode(k.logger, pc.Node, hostname, k.port, agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, version)
provider.ConfigureNode(k.logger, pc.Node, cfg.AgentHostname, k.port, k.agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, cfg.Version, cfg.MirrorHostNodes)
return utilProvider, &provider.Node{}, nil
}
}
func (k *kubelet) nodeOpts(ctx context.Context, srvPort, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
return func(c *nodeutil.NodeConfig) error {
c.HTTPListenAddr = fmt.Sprintf(":%s", srvPort)
c.HTTPListenAddr = fmt.Sprintf(":%d", srvPort)
// set up the routes
mux := http.NewServeMux()
if err := nodeutil.AttachProviderRoutes(mux)(c); err != nil {
@@ -331,12 +352,11 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
b.ClientCA.Content,
b.ClientCAKey.Content,
)
if err != nil {
return nil, err
}
url := fmt.Sprintf("https://%s:%d", server.ServiceName(cluster.Name), server.ServerPort)
url := "https://" + server.ServiceName(cluster.Name)
kubeconfigData, err := kubeconfigBytes(url, []byte(b.ServerCA.Content), adminCert, adminKey)
if err != nil {
@@ -390,12 +410,13 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())
}
// POD IP
podIP := net.ParseIP(os.Getenv("POD_IP"))
ip := net.ParseIP(agentIP)
altNames := certutil.AltNames{
DNSNames: []string{hostname},
IPs: []net.IP{ip},
IPs: []net.IP{ip, podIP},
}
cert, key, err := certs.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content)

View File

@@ -2,14 +2,21 @@ package main
import (
"context"
"errors"
"fmt"
"os"
"strings"
"github.com/go-logr/zapr"
"github.com/rancher/k3k/pkg/log"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"go.uber.org/zap"
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/rancher/k3k/pkg/log"
)
var (
@@ -20,119 +27,101 @@ var (
)
func main() {
app := cli.NewApp()
app.Name = "k3k-kubelet"
app.Usage = "virtual kubelet implementation k3k"
app.Flags = []cli.Flag{
&cli.StringFlag{
Name: "cluster-name",
Usage: "Name of the k3k cluster",
Destination: &cfg.ClusterName,
EnvVars: []string{"CLUSTER_NAME"},
},
&cli.StringFlag{
Name: "cluster-namespace",
Usage: "Namespace of the k3k cluster",
Destination: &cfg.ClusterNamespace,
EnvVars: []string{"CLUSTER_NAMESPACE"},
},
&cli.StringFlag{
Name: "cluster-token",
Usage: "K3S token of the k3k cluster",
Destination: &cfg.Token,
EnvVars: []string{"CLUSTER_TOKEN"},
},
&cli.StringFlag{
Name: "host-config-path",
Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config",
Destination: &cfg.HostConfigPath,
EnvVars: []string{"HOST_KUBECONFIG"},
},
&cli.StringFlag{
Name: "virtual-config-path",
Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster",
Destination: &cfg.VirtualConfigPath,
EnvVars: []string{"CLUSTER_NAME"},
},
&cli.StringFlag{
Name: "kubelet-port",
Usage: "kubelet API port number",
Destination: &cfg.KubeletPort,
EnvVars: []string{"SERVER_PORT"},
Value: "10250",
},
&cli.StringFlag{
Name: "service-name",
Usage: "The service name deployed by the k3k controller",
Destination: &cfg.ServiceName,
EnvVars: []string{"SERVICE_NAME"},
},
&cli.StringFlag{
Name: "agent-hostname",
Usage: "Agent Hostname used for TLS SAN for the kubelet server",
Destination: &cfg.AgentHostname,
EnvVars: []string{"AGENT_HOSTNAME"},
},
&cli.StringFlag{
Name: "server-ip",
Usage: "Server IP used for registering the virtual kubelet to the cluster",
Destination: &cfg.ServerIP,
EnvVars: []string{"SERVER_IP"},
},
&cli.StringFlag{
Name: "version",
Usage: "Version of kubernetes server",
Destination: &cfg.Version,
EnvVars: []string{"VERSION"},
},
&cli.StringFlag{
Name: "config",
Usage: "Path to k3k-kubelet config file",
Destination: &configFile,
EnvVars: []string{"CONFIG_FILE"},
Value: "/etc/rancher/k3k/config.yaml",
},
&cli.BoolFlag{
Name: "debug",
Usage: "Enable debug logging",
Destination: &debug,
EnvVars: []string{"DEBUG"},
rootCmd := &cobra.Command{
Use: "k3k-kubelet",
Short: "virtual kubelet implementation k3k",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := InitializeConfig(cmd); err != nil {
return err
}
logger = log.New(debug)
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
return nil
},
RunE: run,
}
app.Before = func(clx *cli.Context) error {
logger = log.New(debug)
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
return nil
}
app.Action = run
rootCmd.PersistentFlags().StringVar(&cfg.ClusterName, "cluster-name", "", "Name of the k3k cluster")
rootCmd.PersistentFlags().StringVar(&cfg.ClusterNamespace, "cluster-namespace", "", "Namespace of the k3k cluster")
rootCmd.PersistentFlags().StringVar(&cfg.Token, "token", "", "K3S token of the k3k cluster")
rootCmd.PersistentFlags().StringVar(&cfg.HostKubeconfig, "host-kubeconfig", "", "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config")
rootCmd.PersistentFlags().StringVar(&cfg.VirtKubeconfig, "virt-kubeconfig", "", "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster")
rootCmd.PersistentFlags().IntVar(&cfg.KubeletPort, "kubelet-port", 0, "kubelet API port number")
rootCmd.PersistentFlags().IntVar(&cfg.WebhookPort, "webhook-port", 0, "Webhook port number")
rootCmd.PersistentFlags().StringVar(&cfg.ServiceName, "service-name", "", "The service name deployed by the k3k controller")
rootCmd.PersistentFlags().StringVar(&cfg.AgentHostname, "agent-hostname", "", "Agent Hostname used for TLS SAN for the kubelet server")
rootCmd.PersistentFlags().StringVar(&cfg.ServerIP, "server-ip", "", "Server IP used for registering the virtual kubelet to the cluster")
rootCmd.PersistentFlags().StringVar(&cfg.Version, "version", "", "Version of kubernetes server")
rootCmd.PersistentFlags().StringVar(&configFile, "config", "/opt/rancher/k3k/config.yaml", "Path to k3k-kubelet config file")
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug logging")
rootCmd.PersistentFlags().BoolVar(&cfg.MirrorHostNodes, "mirror-host-nodes", false, "Mirror real node objects from host cluster")
if err := app.Run(os.Args); err != nil {
if err := rootCmd.Execute(); err != nil {
logrus.Fatal(err)
}
}
func run(clx *cli.Context) error {
func run(cmd *cobra.Command, args []string) error {
ctx := context.Background()
if err := cfg.parse(configFile); err != nil {
logger.Fatalw("failed to parse config file", "path", configFile, zap.Error(err))
}
if err := cfg.validate(); err != nil {
logger.Fatalw("failed to validate config", zap.Error(err))
return fmt.Errorf("failed to validate config: %w", err)
}
k, err := newKubelet(ctx, &cfg, logger)
if err != nil {
logger.Fatalw("failed to create new virtual kubelet instance", zap.Error(err))
return fmt.Errorf("failed to create new virtual kubelet instance: %w", err)
}
if err := k.registerNode(ctx, k.agentIP, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, cfg.ServerIP, k.dnsIP, cfg.Version); err != nil {
logger.Fatalw("failed to register new node", zap.Error(err))
if err := k.registerNode(ctx, k.agentIP, cfg); err != nil {
return fmt.Errorf("failed to register new node: %w", err)
}
k.start(ctx)
return nil
}
// InitializeConfig sets up viper to read from config file, environment variables, and flags.
// It uses a `flatcase` convention for viper keys to match the (lowercased) config file keys,
// while flags remain in kebab-case.
func InitializeConfig(cmd *cobra.Command) error {
var err error
// Bind every cobra flag to a viper key.
// The viper key will be the flag name with dashes removed (flatcase).
// e.g. "cluster-name" becomes "clustername"
cmd.Flags().VisitAll(func(f *pflag.Flag) {
configName := strings.ReplaceAll(f.Name, "-", "")
envName := strings.ToUpper(strings.ReplaceAll(f.Name, "-", "_"))
err = errors.Join(err, viper.BindPFlag(configName, f))
err = errors.Join(err, viper.BindEnv(configName, envName))
})
if err != nil {
return err
}
configFile = viper.GetString("config")
viper.SetConfigFile(configFile)
if err := viper.ReadInConfig(); err != nil {
var notFoundErr viper.ConfigFileNotFoundError
if errors.As(err, &notFoundErr) || errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("no config file found: %w", err)
} else {
return fmt.Errorf("failed to read config file: %w", err)
}
}
// Unmarshal all configuration into the global cfg struct.
// Viper correctly handles the precedence of flags > env > config.
if err := viper.Unmarshal(&cfg); err != nil {
return fmt.Errorf("failed to unmarshal config: %w", err)
}
// Separately get the debug flag, as it's not part of the main config struct.
debug = viper.GetBool("debug")
return nil
}

View File

@@ -10,8 +10,8 @@ package collectors
import (
"time"
stats "github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
compbasemetrics "k8s.io/component-base/metrics"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
)
// defining metrics
@@ -91,14 +91,20 @@ var _ compbasemetrics.StableCollector = &resourceMetricsCollector{}
// DescribeWithStability implements compbasemetrics.StableCollector
func (rc *resourceMetricsCollector) DescribeWithStability(ch chan<- *compbasemetrics.Desc) {
ch <- nodeCPUUsageDesc
ch <- nodeMemoryUsageDesc
ch <- containerStartTimeDesc
ch <- containerCPUUsageDesc
ch <- containerMemoryUsageDesc
ch <- podCPUUsageDesc
ch <- podMemoryUsageDesc
ch <- resourceScrapeResultDesc
descs := []*compbasemetrics.Desc{
nodeCPUUsageDesc,
nodeMemoryUsageDesc,
containerStartTimeDesc,
containerCPUUsageDesc,
containerMemoryUsageDesc,
podCPUUsageDesc,
podMemoryUsageDesc,
resourceScrapeResultDesc,
}
for _, desc := range descs {
ch <- desc
}
}
// CollectWithStability implements compbasemetrics.StableCollector

View File

@@ -4,56 +4,71 @@ import (
"context"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3klog "github.com/rancher/k3k/pkg/log"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3klog "github.com/rancher/k3k/pkg/log"
)
func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string) {
node.Status.Conditions = nodeConditions()
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
node.Status.Addresses = []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: hostname,
},
{
Type: v1.NodeInternalIP,
Address: ip,
},
}
node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
node.Labels["kubernetes.io/os"] = "linux"
// configure versions
node.Status.NodeInfo.KubeletVersion = version
node.Status.NodeInfo.KubeProxyVersion = version
updateNodeCapacityInterval := 10 * time.Second
ticker := time.NewTicker(updateNodeCapacityInterval)
go func() {
for range ticker.C {
if err := updateNodeCapacity(coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
logger.Error("error updating node capacity", err)
}
func ConfigureNode(logger *k3klog.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string, mirrorHostNodes bool) {
ctx := context.Background()
if mirrorHostNodes {
hostNode, err := coreClient.Nodes().Get(ctx, node.Name, metav1.GetOptions{})
if err != nil {
logger.Fatal("error getting host node for mirroring", err)
}
}()
node.Spec = *hostNode.Spec.DeepCopy()
node.Status = *hostNode.Status.DeepCopy()
node.Labels = hostNode.GetLabels()
node.Annotations = hostNode.GetAnnotations()
node.Finalizers = hostNode.GetFinalizers()
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
} else {
node.Status.Conditions = nodeConditions()
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
node.Status.Addresses = []corev1.NodeAddress{
{
Type: corev1.NodeHostName,
Address: hostname,
},
{
Type: corev1.NodeInternalIP,
Address: ip,
},
}
node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
node.Labels["kubernetes.io/os"] = "linux"
// configure versions
node.Status.NodeInfo.KubeletVersion = version
updateNodeCapacityInterval := 10 * time.Second
ticker := time.NewTicker(updateNodeCapacityInterval)
go func() {
for range ticker.C {
if err := updateNodeCapacity(ctx, coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
logger.Error("error updating node capacity", err)
}
}
}()
}
}
// nodeConditions returns the basic conditions which mark the node as ready
func nodeConditions() []v1.NodeCondition {
return []v1.NodeCondition{
func nodeConditions() []corev1.NodeCondition {
return []corev1.NodeCondition{
{
Type: "Ready",
Status: v1.ConditionTrue,
Status: corev1.ConditionTrue,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletReady",
@@ -61,7 +76,7 @@ func nodeConditions() []v1.NodeCondition {
},
{
Type: "OutOfDisk",
Status: v1.ConditionFalse,
Status: corev1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasSufficientDisk",
@@ -69,7 +84,7 @@ func nodeConditions() []v1.NodeCondition {
},
{
Type: "MemoryPressure",
Status: v1.ConditionFalse,
Status: corev1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasSufficientMemory",
@@ -77,7 +92,7 @@ func nodeConditions() []v1.NodeCondition {
},
{
Type: "DiskPressure",
Status: v1.ConditionFalse,
Status: corev1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasNoDiskPressure",
@@ -85,7 +100,7 @@ func nodeConditions() []v1.NodeCondition {
},
{
Type: "NetworkUnavailable",
Status: v1.ConditionFalse,
Status: corev1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "RouteCreated",
@@ -96,9 +111,7 @@ func nodeConditions() []v1.NodeCondition {
// updateNodeCapacity will update the virtual node capacity (and the allocatable field) with the sum of all the resource in the host nodes.
// If the nodeLabels are specified only the matching nodes will be considered.
func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error {
ctx := context.Background()
func updateNodeCapacity(ctx context.Context, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error {
capacity, allocatable, err := getResourcesFromNodes(ctx, coreClient, nodeLabels)
if err != nil {
return err
@@ -117,7 +130,7 @@ func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client
// getResourcesFromNodes will return a sum of all the resource capacity of the host nodes, and the allocatable resources.
// If some node labels are specified only the matching nodes will be considered.
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (v1.ResourceList, v1.ResourceList, error) {
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (corev1.ResourceList, corev1.ResourceList, error) {
listOpts := metav1.ListOptions{}
if nodeLabels != nil {

View File

@@ -3,44 +3,48 @@ package provider
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"maps"
"net/http"
"strconv"
"strings"
"time"
dto "github.com/prometheus/client_model/go"
"github.com/rancher/k3k/k3k-kubelet/controller"
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3klog "github.com/rancher/k3k/pkg/log"
"github.com/google/go-cmp/cmp"
"github.com/virtual-kubelet/virtual-kubelet/node/api"
"github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"errors"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/portforward"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/client-go/transport/spdy"
compbasemetrics "k8s.io/component-base/metrics"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
dto "github.com/prometheus/client_model/go"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
compbasemetrics "k8s.io/component-base/metrics"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/rancher/k3k/k3k-kubelet/controller"
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
)
// check at compile time if the Provider implements the nodeutil.Provider interface
@@ -62,9 +66,7 @@ type Provider struct {
logger *k3klog.Logger
}
var (
ErrRetryTimeout = errors.New("provider timed out")
)
var ErrRetryTimeout = errors.New("provider timed out")
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3klog.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
coreClient, err := cv1.NewForConfig(&hostConfig)
@@ -205,18 +207,18 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
}
// GetStatsSummary gets the stats for the node, including running pods
func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary, error) {
func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
p.logger.Debug("GetStatsSummary")
nodeList := &v1.NodeList{}
nodeList := &corev1.NodeList{}
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
return nil, fmt.Errorf("unable to get nodes of cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
}
// fetch the stats from all the nodes
var (
nodeStats statsv1alpha1.NodeStats
allPodsStats []statsv1alpha1.PodStats
nodeStats stats.NodeStats
allPodsStats []stats.PodStats
)
for _, n := range nodeList.Items {
@@ -234,7 +236,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
)
}
stats := &statsv1alpha1.Summary{}
stats := &stats.Summary{}
if err := json.Unmarshal(res, stats); err != nil {
return nil, err
}
@@ -251,16 +253,16 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
return nil, err
}
podsNameMap := make(map[string]*v1.Pod)
podsNameMap := make(map[string]*corev1.Pod)
for _, pod := range pods {
hostPodName := p.Translator.TranslateName(pod.Namespace, pod.Name)
podsNameMap[hostPodName] = pod
}
filteredStats := &statsv1alpha1.Summary{
filteredStats := &stats.Summary{
Node: nodeStats,
Pods: make([]statsv1alpha1.PodStats, 0),
Pods: make([]stats.PodStats, 0),
}
for _, podStat := range allPodsStats {
@@ -271,7 +273,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
// rewrite the PodReference to match the data of the virtual cluster
if pod, found := podsNameMap[podStat.PodRef.Name]; found {
podStat.PodRef = statsv1alpha1.PodReference{
podStat.PodRef = stats.PodReference{
Name: pod.Name,
Namespace: pod.Namespace,
UID: string(pod.UID),
@@ -324,7 +326,6 @@ func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port
// should send a value on stopChannel so that the PortForward is stopped. However, we only have a ReadWriteCloser
// so more work is needed to detect a close and handle that appropriately.
fw, err := portforward.New(dialer, []string{portAsString}, stopChannel, readyChannel, stream, stream)
if err != nil {
return err
}
@@ -365,15 +366,22 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
// setting the hostname for the pod if its not set
if pod.Spec.Hostname == "" {
tPod.Spec.Hostname = pod.Name
tPod.Spec.Hostname = k3kcontroller.SafeConcatName(pod.Name)
}
// if the priorityCluss for the virtual cluster is set then override the provided value
// if the priorityClass for the virtual cluster is set then override the provided value
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
if cluster.Spec.PriorityClass != "" {
tPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
tPod.Spec.Priority = nil
if !strings.HasPrefix(tPod.Spec.PriorityClassName, "system-") {
if tPod.Spec.PriorityClassName != "" {
tPriorityClassName := p.Translator.TranslateName("", tPod.Spec.PriorityClassName)
tPod.Spec.PriorityClassName = tPriorityClassName
}
if cluster.Spec.PriorityClass != "" {
tPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
tPod.Spec.Priority = nil
}
}
// fieldpath annotations
@@ -398,11 +406,16 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
)
// set ownerReference to the cluster object
if err := controllerutil.SetControllerReference(&cluster, tPod, p.HostClient.Scheme()); err != nil {
return err
}
return p.HostClient.Create(ctx, tPod)
}
// withRetry retries passed function with interval and timeout
func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Pod) error, pod *v1.Pod) error {
func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *corev1.Pod) error, pod *corev1.Pod) error {
const (
interval = 2 * time.Second
timeout = 10 * time.Second
@@ -480,18 +493,22 @@ func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, vo
if err := p.syncSecret(ctx, podNamespace, secretName, optional); err != nil {
return fmt.Errorf("unable to sync projected secret %s: %w", secretName, err)
}
source.Secret.Name = p.Translator.TranslateName(podNamespace, secretName)
}
}
} else if volume.PersistentVolumeClaim != nil {
volume.PersistentVolumeClaim.ClaimName = p.Translator.TranslateName(podNamespace, volume.PersistentVolumeClaim.ClaimName)
} else if volume.DownwardAPI != nil {
for _, downwardAPI := range volume.DownwardAPI.Items {
if downwardAPI.FieldRef.FieldPath == translate.MetadataNameField {
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
}
if downwardAPI.FieldRef != nil {
if downwardAPI.FieldRef.FieldPath == translate.MetadataNameField {
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
}
if downwardAPI.FieldRef.FieldPath == translate.MetadataNamespaceField {
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
if downwardAPI.FieldRef.FieldPath == translate.MetadataNamespaceField {
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
}
}
}
}
@@ -556,7 +573,7 @@ func (p *Provider) UpdatePod(ctx context.Context, pod *corev1.Pod) error {
return p.withRetry(ctx, p.updatePod, pod)
}
func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
p.logger.Debugw("got a request for update pod")
// Once scheduled a Pod cannot update other fields than the image of the containers, initcontainers and a few others
@@ -564,11 +581,36 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
// Update Pod in the virtual cluster
var currentVirtualPod v1.Pod
var currentVirtualPod corev1.Pod
if err := p.VirtualClient.Get(ctx, client.ObjectKeyFromObject(pod), &currentVirtualPod); err != nil {
return fmt.Errorf("unable to get pod to update from virtual cluster: %w", err)
}
hostNamespaceName := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: p.Translator.TranslateName(pod.Namespace, pod.Name),
}
var currentHostPod corev1.Pod
if err := p.HostClient.Get(ctx, hostNamespaceName, &currentHostPod); err != nil {
return fmt.Errorf("unable to get pod to update from host cluster: %w", err)
}
// Handle ephemeral containers
if !cmp.Equal(currentHostPod.Spec.EphemeralContainers, pod.Spec.EphemeralContainers) {
p.logger.Info("Updating ephemeral containers")
currentHostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, currentHostPod.Name, &currentHostPod, metav1.UpdateOptions{}); err != nil {
p.logger.Errorf("error when updating ephemeral containers: %v", err)
return err
}
return nil
}
currentVirtualPod.Spec.Containers = updateContainerImages(currentVirtualPod.Spec.Containers, pod.Spec.Containers)
currentVirtualPod.Spec.InitContainers = updateContainerImages(currentVirtualPod.Spec.InitContainers, pod.Spec.InitContainers)
@@ -584,17 +626,6 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
}
// Update Pod in the host cluster
hostNamespaceName := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: p.Translator.TranslateName(pod.Namespace, pod.Name),
}
var currentHostPod corev1.Pod
if err := p.HostClient.Get(ctx, hostNamespaceName, &currentHostPod); err != nil {
return fmt.Errorf("unable to get pod to update from host cluster: %w", err)
}
currentHostPod.Spec.Containers = updateContainerImages(currentHostPod.Spec.Containers, pod.Spec.Containers)
currentHostPod.Spec.InitContainers = updateContainerImages(currentHostPod.Spec.InitContainers, pod.Spec.InitContainers)
@@ -602,6 +633,10 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
currentHostPod.Spec.ActiveDeadlineSeconds = pod.Spec.ActiveDeadlineSeconds
currentHostPod.Spec.Tolerations = pod.Spec.Tolerations
// in the virtual cluster we can update also the labels and annotations
maps.Copy(currentHostPod.Annotations, pod.Annotations)
maps.Copy(currentHostPod.Labels, pod.Labels)
if err := p.HostClient.Update(ctx, &currentHostPod); err != nil {
return fmt.Errorf("unable to update pod in the host cluster: %w", err)
}
@@ -610,7 +645,7 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
}
// updateContainerImages will update the images of the original container images with the same name
func updateContainerImages(original, updated []v1.Container) []v1.Container {
func updateContainerImages(original, updated []corev1.Container) []corev1.Container {
newImages := make(map[string]string)
for _, c := range updated {
@@ -772,8 +807,8 @@ func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
selector = selector.Add(*requirement)
var podList corev1.PodList
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
if err != nil {
return nil, fmt.Errorf("unable to list pods: %w", err)
}
@@ -805,7 +840,7 @@ func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP
})
// injecting cluster DNS IP to the pods except for coredns pod
if !strings.HasPrefix(podName, "coredns") {
if !strings.HasPrefix(podName, "coredns") && pod.Spec.DNSConfig == nil {
pod.Spec.DNSPolicy = corev1.DNSNone
pod.Spec.DNSConfig = &corev1.PodDNSConfig{
Nameservers: []string{
@@ -816,17 +851,20 @@ func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP
"svc.cluster.local",
"cluster.local",
},
Options: []corev1.PodDNSConfigOption{
{
Name: "ndots",
Value: ptr.To("5"),
},
},
}
}
updatedEnvVars := []corev1.EnvVar{
{Name: "KUBERNETES_PORT", Value: "tcp://" + serverIP + ":6443"},
{Name: "KUBERNETES_SERVICE_HOST", Value: serverIP},
{Name: "KUBERNETES_SERVICE_PORT", Value: "6443"},
{Name: "KUBERNETES_SERVICE_PORT_HTTPS", Value: "6443"},
{Name: "KUBERNETES_PORT_443_TCP", Value: "tcp://" + serverIP + ":6443"},
{Name: "KUBERNETES_PORT", Value: "tcp://" + serverIP + ":443"},
{Name: "KUBERNETES_PORT_443_TCP", Value: "tcp://" + serverIP + ":443"},
{Name: "KUBERNETES_PORT_443_TCP_ADDR", Value: serverIP},
{Name: "KUBERNETES_PORT_443_TCP_PORT", Value: "6443"},
}
// inject networking information to the pod's environment variables
@@ -838,6 +876,11 @@ func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP
for i := range pod.Spec.InitContainers {
pod.Spec.InitContainers[i].Env = overrideEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
}
// handle ephemeral containers as well
for i := range pod.Spec.EphemeralContainers {
pod.Spec.EphemeralContainers[i].Env = overrideEnvVars(pod.Spec.EphemeralContainers[i].Env, updatedEnvVars)
}
}
// overrideEnvVars will override the orig environment variables if found in the updated list
@@ -891,45 +934,17 @@ func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
// configureFieldPathEnv will retrieve all annotations created by the pod mutator webhook
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
// assigned annotations
func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {
for _, container := range pod.Spec.EphemeralContainers {
addFieldPathAnnotationToEnv(container.Env)
}
// override metadata.name and metadata.namespace with pod annotations
for i, container := range pod.Spec.InitContainers {
for j, envVar := range container.Env {
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
continue
}
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
if fieldPath == translate.MetadataNameField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
pod.Spec.InitContainers[i].Env[j] = envVar
}
if fieldPath == translate.MetadataNamespaceField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.MetadataNamespaceField)
pod.Spec.InitContainers[i].Env[j] = envVar
}
}
for _, container := range pod.Spec.InitContainers {
addFieldPathAnnotationToEnv(container.Env)
}
for i, container := range pod.Spec.Containers {
for j, envVar := range container.Env {
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
continue
}
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
if fieldPath == translate.MetadataNameField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
pod.Spec.Containers[i].Env[j] = envVar
}
if fieldPath == translate.MetadataNamespaceField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
pod.Spec.Containers[i].Env[j] = envVar
}
}
for _, container := range pod.Spec.Containers {
addFieldPathAnnotationToEnv(container.Env)
}
for name, value := range pod.Annotations {
@@ -939,10 +954,10 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
return err
}
// re-adding these envs to the pod
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, v1.EnvVar{
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, corev1.EnvVar{
Name: envName,
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: value,
},
},
@@ -954,3 +969,22 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
return nil
}
func addFieldPathAnnotationToEnv(envVars []corev1.EnvVar) {
for j, envVar := range envVars {
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
continue
}
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
if fieldPath == translate.MetadataNameField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
envVars[j] = envVar
}
if fieldPath == translate.MetadataNamespaceField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
envVars[j] = envVar
}
}
}

View File

@@ -5,7 +5,6 @@ import (
"testing"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
)
func Test_overrideEnvVars(t *testing.T) {
@@ -22,42 +21,42 @@ func Test_overrideEnvVars(t *testing.T) {
{
name: "orig and new are empty",
args: args{
orig: []v1.EnvVar{},
new: []v1.EnvVar{},
orig: []corev1.EnvVar{},
new: []corev1.EnvVar{},
},
want: []v1.EnvVar{},
want: []corev1.EnvVar{},
},
{
name: "only orig is empty",
args: args{
orig: []v1.EnvVar{},
new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
orig: []corev1.EnvVar{},
new: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
},
want: []v1.EnvVar{},
want: []corev1.EnvVar{},
},
{
name: "orig has a matching element",
args: args{
orig: []v1.EnvVar{{Name: "FOO", Value: "old_val"}},
new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
orig: []corev1.EnvVar{{Name: "FOO", Value: "old_val"}},
new: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
},
want: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
want: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
},
{
name: "orig have multiple elements",
args: args{
orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}},
orig: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
new: []corev1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}},
},
want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
},
{
name: "orig and new have multiple elements and some not matching",
args: args{
orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
orig: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
new: []corev1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
},
want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
},
}

View File

@@ -5,12 +5,14 @@ import (
"fmt"
"strings"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
)
const (
@@ -110,6 +112,7 @@ func removeKubeAccessVolume(pod *corev1.Pod) {
for i, volume := range pod.Spec.Volumes {
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
pod.Spec.Volumes = append(pod.Spec.Volumes[:i], pod.Spec.Volumes[i+1:]...)
break
}
}
// init containers
@@ -117,6 +120,17 @@ func removeKubeAccessVolume(pod *corev1.Pod) {
for j, mountPath := range container.VolumeMounts {
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts[:j], pod.Spec.InitContainers[i].VolumeMounts[j+1:]...)
break
}
}
}
// ephemeral containers
for i, container := range pod.Spec.EphemeralContainers {
for j, mountPath := range container.VolumeMounts {
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
pod.Spec.EphemeralContainers[i].VolumeMounts = append(pod.Spec.EphemeralContainers[i].VolumeMounts[:j], pod.Spec.EphemeralContainers[i].VolumeMounts[j+1:]...)
break
}
}
}
@@ -125,13 +139,15 @@ func removeKubeAccessVolume(pod *corev1.Pod) {
for j, mountPath := range container.VolumeMounts {
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts[:j], pod.Spec.Containers[i].VolumeMounts[j+1:]...)
break
}
}
}
}
func addKubeAccessVolume(pod *corev1.Pod, hostSecretName string) {
var tokenVolumeName = k3kcontroller.SafeConcatNameWithPrefix(kubeAPIAccessPrefix)
tokenVolumeName := k3kcontroller.SafeConcatNameWithPrefix(kubeAPIAccessPrefix)
pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{
Name: tokenVolumeName,
VolumeSource: corev1.VolumeSource{

View File

@@ -2,10 +2,11 @@ package translate
import (
"encoding/hex"
"fmt"
"strings"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
@@ -99,14 +100,26 @@ func (t *ToHostTranslator) TranslateFrom(obj client.Object) {
// TranslateName returns the name of the resource in the host cluster. Will not update the object with this name.
func (t *ToHostTranslator) TranslateName(namespace string, name string) string {
var names []string
// some resources are not namespaced (i.e. priorityclasses)
/// for these resources we skip the namespace to avoid having a name like: prioritclass--cluster-123
if namespace == "" {
names = []string{name, t.ClusterName}
} else {
names = []string{name, namespace, t.ClusterName}
}
// we need to come up with a name which is:
// - somewhat connectable to the original resource
// - a valid k8s name
// - idempotently calculatable
// - unique for this combination of name/namespace/cluster
namePrefix := fmt.Sprintf("%s-%s-%s", name, namespace, t.ClusterName)
namePrefix := strings.Join(names, "-")
// use + as a separator since it can't be in an object name
nameKey := fmt.Sprintf("%s+%s+%s", name, namespace, t.ClusterName)
nameKey := strings.Join(names, "+")
// it's possible that the suffix will be in the name, so we use hex to make it valid for k8s
nameSuffix := hex.EncodeToString([]byte(nameKey))

132
main.go
View File

@@ -1,28 +1,30 @@
//go:generate ./hack/update-codegen.sh
//go:generate ./scripts/generate
package main
import (
"context"
"errors"
"fmt"
"os"
"github.com/go-logr/zapr"
"github.com/spf13/cobra"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/manager"
v1 "k8s.io/api/core/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/clusterset"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/policy"
"github.com/rancher/k3k/pkg/log"
"github.com/urfave/cli/v2"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
var (
@@ -31,41 +33,13 @@ var (
sharedAgentImage string
sharedAgentImagePullPolicy string
kubeconfig string
k3SImage string
k3SImagePullPolicy string
kubeletPortRange string
webhookPortRange string
maxConcurrentReconciles int
debug bool
logger *log.Logger
flags = []cli.Flag{
&cli.StringFlag{
Name: "kubeconfig",
EnvVars: []string{"KUBECONFIG"},
Usage: "Kubeconfig path",
Destination: &kubeconfig,
},
&cli.StringFlag{
Name: "cluster-cidr",
EnvVars: []string{"CLUSTER_CIDR"},
Usage: "Cluster CIDR to be added to the networkpolicy of the clustersets",
Destination: &clusterCIDR,
},
&cli.StringFlag{
Name: "shared-agent-image",
EnvVars: []string{"SHARED_AGENT_IMAGE"},
Usage: "K3K Virtual Kubelet image",
Value: "rancher/k3k:latest",
Destination: &sharedAgentImage,
},
&cli.StringFlag{
Name: "shared-agent-pull-policy",
EnvVars: []string{"SHARED_AGENT_PULL_POLICY"},
Usage: "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never",
Destination: &sharedAgentImagePullPolicy,
},
&cli.BoolFlag{
Name: "debug",
EnvVars: []string{"DEBUG"},
Usage: "Debug level logging",
Destination: &debug,
},
}
)
func init() {
@@ -74,26 +48,37 @@ func init() {
}
func main() {
app := cmds.NewApp()
app.Flags = flags
app.Action = run
app.Version = buildinfo.Version
app.Before = func(clx *cli.Context) error {
if err := validate(); err != nil {
return err
}
logger = log.New(debug)
return nil
rootCmd := &cobra.Command{
Use: "k3k",
Short: "k3k controller",
Version: buildinfo.Version,
PreRunE: func(cmd *cobra.Command, args []string) error {
return validate()
},
PersistentPreRun: func(cmd *cobra.Command, args []string) {
cmds.InitializeConfig(cmd)
logger = log.New(debug)
},
RunE: run,
}
if err := app.Run(os.Args); err != nil {
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Debug level logging")
rootCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", "", "kubeconfig path")
rootCmd.PersistentFlags().StringVar(&clusterCIDR, "cluster-cidr", "", "Cluster CIDR to be added to the networkpolicy")
rootCmd.PersistentFlags().StringVar(&sharedAgentImage, "shared-agent-image", "", "K3K Virtual Kubelet image")
rootCmd.PersistentFlags().StringVar(&sharedAgentImagePullPolicy, "shared-agent-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&kubeletPortRange, "kubelet-port-range", "50000-51000", "Port Range for k3k kubelet in shared mode")
rootCmd.PersistentFlags().StringVar(&webhookPortRange, "webhook-port-range", "51001-52000", "Port Range for k3k kubelet webhook in shared mode")
rootCmd.PersistentFlags().StringVar(&k3SImage, "k3s-image", "rancher/k3k", "K3K server image")
rootCmd.PersistentFlags().StringVar(&k3SImagePullPolicy, "k3s-image-pull-policy", "", "K3K server image pull policy")
rootCmd.PersistentFlags().IntVar(&maxConcurrentReconciles, "max-concurrent-reconciles", 50, "maximum number of concurrent reconciles")
if err := rootCmd.Execute(); err != nil {
logger.Fatalw("failed to run k3k controller", zap.Error(err))
}
}
func run(clx *cli.Context) error {
func run(cmd *cobra.Command, args []string) error {
ctx := context.Background()
logger.Info("Starting k3k - Version: " + buildinfo.Version)
@@ -106,7 +91,6 @@ func run(clx *cli.Context) error {
mgr, err := ctrl.NewManager(restConfig, manager.Options{
Scheme: scheme,
})
if err != nil {
return fmt.Errorf("failed to create new controller runtime manager: %v", err)
}
@@ -115,28 +99,30 @@ func run(clx *cli.Context) error {
logger.Info("adding cluster controller")
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy); err != nil {
portAllocator, err := agent.NewPortAllocator(ctx, mgr.GetClient())
if err != nil {
return err
}
runnable := portAllocator.InitPortAllocatorConfig(ctx, mgr.GetClient(), kubeletPortRange, webhookPortRange)
if err := mgr.Add(runnable); err != nil {
return err
}
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage, k3SImagePullPolicy, maxConcurrentReconciles, portAllocator, nil); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
}
logger.Info("adding etcd pod controller")
if err := cluster.AddPodController(ctx, mgr); err != nil {
if err := cluster.AddPodController(ctx, mgr, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
}
logger.Info("adding clusterset controller")
logger.Info("adding clusterpolicy controller")
if err := clusterset.Add(ctx, mgr, clusterCIDR); err != nil {
return fmt.Errorf("failed to add the clusterset controller: %v", err)
}
if clusterCIDR == "" {
logger.Info("adding networkpolicy node controller")
if err := clusterset.AddNodeController(ctx, mgr); err != nil {
return fmt.Errorf("failed to add the clusterset node controller: %v", err)
}
if err := policy.Add(mgr, clusterCIDR, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add the clusterpolicy controller: %v", err)
}
if err := mgr.Start(ctx); err != nil {

View File

@@ -1,5 +1,3 @@
package k3k
var (
GroupName = "k3k.io"
)
var GroupName = "k3k.io"

View File

@@ -1,10 +1,12 @@
package v1alpha1
import (
k3k "github.com/rancher/k3k/pkg/apis/k3k.io"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k3k "github.com/rancher/k3k/pkg/apis/k3k.io"
)
var (
@@ -21,8 +23,8 @@ func addKnownTypes(s *runtime.Scheme) error {
s.AddKnownTypes(SchemeGroupVersion,
&Cluster{},
&ClusterList{},
&ClusterSet{},
&ClusterSetList{},
&VirtualClusterPolicy{},
&VirtualClusterPolicyList{},
)
metav1.AddToGroupVersion(s, SchemeGroupVersion)

View File

@@ -10,6 +10,9 @@ import (
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.mode",name=Mode,type=string
// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string"
// +kubebuilder:printcolumn:JSONPath=".status.policyName",name=Policy,type=string
// Cluster defines a virtual Kubernetes cluster managed by k3k.
// It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
@@ -26,6 +29,7 @@ type Cluster struct {
// Status reflects the observed state of the Cluster.
//
// +kubebuilder:default={}
// +optional
Status ClusterStatus `json:"status,omitempty"`
}
@@ -37,7 +41,7 @@ type ClusterSpec struct {
// If not specified, the Kubernetes version of the host node will be used.
//
// +optional
Version string `json:"version"`
Version string `json:"version,omitempty"`
// Mode specifies the cluster provisioning mode: "shared" or "virtual".
// Defaults to "shared". This field is immutable.
@@ -93,8 +97,8 @@ type ClusterSpec struct {
// Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.
// A default StorageClass is required for dynamic persistence.
//
// +kubebuilder:default={type: "dynamic"}
Persistence PersistenceConfig `json:"persistence,omitempty"`
// +optional
Persistence PersistenceConfig `json:"persistence"`
// Expose specifies options for exposing the API server.
// By default, it's only exposed as a ClusterIP.
@@ -114,16 +118,11 @@ type ClusterSpec struct {
// +optional
PriorityClass string `json:"priorityClass,omitempty"`
// Limit defines resource limits for server/agent nodes.
//
// +optional
Limit *ClusterLimit `json:"clusterLimit,omitempty"`
// TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.
// The Secret must have a "token" field in its data.
//
// +optional
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef"`
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef,omitempty"`
// TLSSANs specifies subject alternative names for the K3s server certificate.
//
@@ -142,10 +141,41 @@ type ClusterSpec struct {
// +optional
AgentArgs []string `json:"agentArgs,omitempty"`
// ServerEnvs specifies list of environment variables to set in the server pod.
//
// +optional
ServerEnvs []v1.EnvVar `json:"serverEnvs,omitempty"`
// AgentEnvs specifies list of environment variables to set in the agent pod.
//
// +optional
AgentEnvs []v1.EnvVar `json:"agentEnvs,omitempty"`
// Addons specifies secrets containing raw YAML to deploy on cluster startup.
//
// +optional
Addons []Addon `json:"addons,omitempty"`
// ServerLimit specifies resource limits for server nodes.
//
// +optional
ServerLimit v1.ResourceList `json:"serverLimit,omitempty"`
// WorkerLimit specifies resource limits for agent nodes.
//
// +optional
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
// MirrorHostNodes controls whether node objects from the host cluster
// are mirrored into the virtual cluster.
//
// +optional
MirrorHostNodes bool `json:"mirrorHostNodes,omitempty"`
// CustomCAs specifies the cert/key pairs for custom CA certificates.
//
// +optional
CustomCAs CustomCAs `json:"customCAs,omitempty"`
}
// ClusterMode is the possible provisioning mode of a Cluster.
@@ -175,15 +205,6 @@ const (
DynamicPersistenceMode = PersistenceMode("dynamic")
)
// ClusterLimit defines resource limits for server and agent nodes.
type ClusterLimit struct {
// ServerLimit specifies resource limits for server nodes.
ServerLimit v1.ResourceList `json:"serverLimit,omitempty"`
// WorkerLimit specifies resource limits for agent nodes.
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
}
// Addon specifies a Secret containing YAML to be deployed on cluster startup.
type Addon struct {
// SecretNamespace is the namespace of the Secret.
@@ -198,7 +219,7 @@ type PersistenceConfig struct {
// Type specifies the persistence mode.
//
// +kubebuilder:default="dynamic"
Type PersistenceMode `json:"type"`
Type PersistenceMode `json:"type,omitempty"`
// StorageClassName is the name of the StorageClass to use for the PVC.
// This field is only relevant in "dynamic" mode.
@@ -209,6 +230,7 @@ type PersistenceConfig struct {
// StorageRequestSize is the requested size for the PVC.
// This field is only relevant in "dynamic" mode.
//
// +kubebuilder:default="1G"
// +optional
StorageRequestSize string `json:"storageRequestSize,omitempty"`
}
@@ -245,29 +267,81 @@ type IngressConfig struct {
}
// LoadBalancerConfig specifies options for exposing the API server through a LoadBalancer service.
type LoadBalancerConfig struct{}
// NodePortConfig specifies options for exposing the API server through NodePort.
type NodePortConfig struct {
// ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.
// If not specified, a port will be allocated (default: 30000-32767).
type LoadBalancerConfig struct {
// ServerPort is the port on which the K3s server is exposed when type is LoadBalancer.
// If not specified, the default https 443 port will be allocated.
// If 0 or negative, the port will not be exposed.
//
// +optional
ServerPort *int32 `json:"serverPort,omitempty"`
// ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.
// If not specified, a port will be allocated (default: 30000-32767).
//
// +optional
ServicePort *int32 `json:"servicePort,omitempty"`
// ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
// If not specified, a port will be allocated (default: 30000-32767).
// ETCDPort is the port on which the ETCD service is exposed when type is LoadBalancer.
// If not specified, the default etcd 2379 port will be allocated.
// If 0 or negative, the port will not be exposed.
//
// +optional
ETCDPort *int32 `json:"etcdPort,omitempty"`
}
// NodePortConfig specifies options for exposing the API server through NodePort.
type NodePortConfig struct {
// ServerPort is the port on each node on which the K3s server is exposed when type is NodePort.
// If not specified, a random port between 30000-32767 will be allocated.
// If out of range, the port will not be exposed.
//
// +optional
ServerPort *int32 `json:"serverPort,omitempty"`
// ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
// If not specified, a random port between 30000-32767 will be allocated.
// If out of range, the port will not be exposed.
//
// +optional
ETCDPort *int32 `json:"etcdPort,omitempty"`
}
// CustomCAs specifies the cert/key pairs for custom CA certificates.
type CustomCAs struct {
// Enabled toggles this feature on or off.
Enabled bool `json:"enabled,omitempty"`
// Sources defines the sources for all required custom CA certificates.
Sources CredentialSources `json:"sources,omitempty"`
}
// CredentialSources lists all the required credentials, including both
// TLS key pairs and single signing keys.
type CredentialSources struct {
// ServerCA specifies the server-ca cert/key pair.
ServerCA CredentialSource `json:"serverCA,omitempty"`
// ClientCA specifies the client-ca cert/key pair.
ClientCA CredentialSource `json:"clientCA,omitempty"`
// RequestHeaderCA specifies the request-header-ca cert/key pair.
RequestHeaderCA CredentialSource `json:"requestHeaderCA,omitempty"`
// ETCDServerCA specifies the etcd-server-ca cert/key pair.
ETCDServerCA CredentialSource `json:"etcdServerCA,omitempty"`
// ETCDPeerCA specifies the etcd-peer-ca cert/key pair.
ETCDPeerCA CredentialSource `json:"etcdPeerCA,omitempty"`
// ServiceAccountToken specifies the service-account-token key.
ServiceAccountToken CredentialSource `json:"serviceAccountToken,omitempty"`
}
// CredentialSource defines where to get a credential from.
// It can represent either a TLS key pair or a single private key.
type CredentialSource struct {
// SecretName specifies the name of an existing secret to use.
// The controller expects specific keys inside based on the credential type:
// - For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
// - For ServiceAccountTokenKey: 'tls.key'.
// +optional
SecretName string `json:"secretName,omitempty"`
}
// ClusterStatus reflects the observed state of a Cluster.
type ClusterStatus struct {
// HostVersion is the Kubernetes version of the host node.
@@ -295,12 +369,46 @@ type ClusterStatus struct {
// +optional
TLSSANs []string `json:"tlsSANs,omitempty"`
// Persistence specifies options for persisting etcd data.
// PolicyName specifies the virtual cluster policy name bound to the virtual cluster.
//
// +optional
Persistence PersistenceConfig `json:"persistence,omitempty"`
PolicyName string `json:"policyName,omitempty"`
// KubeletPort specefies the port used by k3k-kubelet in shared mode.
//
// +optional
KubeletPort int `json:"kubeletPort,omitempty"`
// WebhookPort specefies the port used by webhook in k3k-kubelet in shared mode.
//
// +optional
WebhookPort int `json:"webhookPort,omitempty"`
// Conditions are the individual conditions for the cluster set.
//
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// Phase is a high-level summary of the cluster's current lifecycle state.
//
// +kubebuilder:default="Unknown"
// +kubebuilder:validation:Enum=Pending;Provisioning;Ready;Failed;Terminating;Unknown
// +optional
Phase ClusterPhase `json:"phase,omitempty"`
}
// ClusterPhase is a high-level summary of the cluster's current lifecycle state.
type ClusterPhase string
const (
ClusterPending = ClusterPhase("Pending")
ClusterProvisioning = ClusterPhase("Provisioning")
ClusterReady = ClusterPhase("Ready")
ClusterFailed = ClusterPhase("Failed")
ClusterTerminating = ClusterPhase("Terminating")
ClusterUnknown = ClusterPhase("Unknown")
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
@@ -317,54 +425,55 @@ type ClusterList struct {
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:object:root=true
// +kubebuilder:printcolumn:JSONPath=".spec.allowedMode",name=Mode,type=string
// +kubebuilder:resource:scope=Cluster,shortName=vcp
// ClusterSet represents a group of virtual Kubernetes clusters managed by k3k.
// It allows defining common configurations and constraints for the clusters within the set.
type ClusterSet struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// VirtualClusterPolicy allows defining common configurations and constraints
// for clusters within a clusterpolicy.
type VirtualClusterPolicy struct {
metav1.ObjectMeta `json:"metadata"`
metav1.TypeMeta `json:",inline"`
// Spec defines the desired state of the ClusterSet.
// Spec defines the desired state of the VirtualClusterPolicy.
//
// +kubebuilder:default={}
Spec ClusterSetSpec `json:"spec"`
Spec VirtualClusterPolicySpec `json:"spec"`
// Status reflects the observed state of the ClusterSet.
// Status reflects the observed state of the VirtualClusterPolicy.
//
// +optional
Status ClusterSetStatus `json:"status,omitempty"`
Status VirtualClusterPolicyStatus `json:"status"`
}
// ClusterSetSpec defines the desired state of a ClusterSet.
type ClusterSetSpec struct {
// DefaultLimits specifies the default resource limits for servers/agents when a cluster in the set doesn't provide any.
// VirtualClusterPolicySpec defines the desired state of a VirtualClusterPolicy.
type VirtualClusterPolicySpec struct {
// Quota specifies the resource limits for clusters within a clusterpolicy.
//
// +optional
DefaultLimits *ClusterLimit `json:"defaultLimits,omitempty"`
Quota *v1.ResourceQuotaSpec `json:"quota,omitempty"`
// DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the set.
// Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy
// to set defaults and constraints (min/max)
//
// +optional
Limit *v1.LimitRangeSpec `json:"limit,omitempty"`
// DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace.
//
// +optional
DefaultNodeSelector map[string]string `json:"defaultNodeSelector,omitempty"`
// DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the set.
// DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the target Namespace.
//
// +optional
DefaultPriorityClass string `json:"defaultPriorityClass,omitempty"`
// MaxLimits specifies the maximum resource limits that apply to all clusters (server + agent) in the set.
// AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared".
//
// +optional
MaxLimits v1.ResourceList `json:"maxLimits,omitempty"`
// AllowedNodeTypes specifies the allowed cluster provisioning modes. Defaults to [shared].
//
// +kubebuilder:default={shared}
// +kubebuilder:default=shared
// +kubebuilder:validation:XValidation:message="mode is immutable",rule="self == oldSelf"
// +kubebuilder:validation:MinItems=1
// +optional
AllowedNodeTypes []ClusterMode `json:"allowedNodeTypes,omitempty"`
AllowedMode ClusterMode `json:"allowedMode,omitempty"`
// DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation.
//
@@ -393,8 +502,8 @@ const (
RestrictedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("restricted")
)
// ClusterSetStatus reflects the observed state of a ClusterSet.
type ClusterSetStatus struct {
// VirtualClusterPolicyStatus reflects the observed state of a VirtualClusterPolicy.
type VirtualClusterPolicyStatus struct {
// ObservedGeneration was the generation at the time the status was updated.
//
// +optional
@@ -421,10 +530,10 @@ type ClusterSetStatus struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// ClusterSetList is a list of ClusterSet resources.
type ClusterSetList struct {
// VirtualClusterPolicyList is a list of VirtualClusterPolicy resources.
type VirtualClusterPolicyList struct {
metav1.ListMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
Items []ClusterSet `json:"items"`
Items []VirtualClusterPolicy `json:"items"`
}

View File

@@ -1,20 +1,18 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Addon) DeepCopyInto(out *Addon) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addon.
@@ -34,7 +32,6 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
@@ -55,36 +52,6 @@ func (in *Cluster) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterLimit) DeepCopyInto(out *ClusterLimit) {
*out = *in
if in.ServerLimit != nil {
in, out := &in.ServerLimit, &out.ServerLimit
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.WorkerLimit != nil {
in, out := &in.WorkerLimit, &out.WorkerLimit
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLimit.
func (in *ClusterLimit) DeepCopy() *ClusterLimit {
if in == nil {
return nil
}
out := new(ClusterLimit)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterList) DeepCopyInto(out *ClusterList) {
*out = *in
@@ -97,7 +64,6 @@ func (in *ClusterList) DeepCopyInto(out *ClusterList) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList.
@@ -118,135 +84,6 @@ func (in *ClusterList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSet) DeepCopyInto(out *ClusterSet) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSet.
func (in *ClusterSet) DeepCopy() *ClusterSet {
if in == nil {
return nil
}
out := new(ClusterSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSetList) DeepCopyInto(out *ClusterSetList) {
*out = *in
in.ListMeta.DeepCopyInto(&out.ListMeta)
out.TypeMeta = in.TypeMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetList.
func (in *ClusterSetList) DeepCopy() *ClusterSetList {
if in == nil {
return nil
}
out := new(ClusterSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSetSpec) DeepCopyInto(out *ClusterSetSpec) {
*out = *in
if in.MaxLimits != nil {
in, out := &in.MaxLimits, &out.MaxLimits
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.DefaultLimits != nil {
in, out := &in.DefaultLimits, &out.DefaultLimits
*out = new(ClusterLimit)
(*in).DeepCopyInto(*out)
}
if in.DefaultNodeSelector != nil {
in, out := &in.DefaultNodeSelector, &out.DefaultNodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.AllowedNodeTypes != nil {
in, out := &in.AllowedNodeTypes, &out.AllowedNodeTypes
*out = make([]ClusterMode, len(*in))
copy(*out, *in)
}
if in.PodSecurityAdmissionLevel != nil {
in, out := &in.PodSecurityAdmissionLevel, &out.PodSecurityAdmissionLevel
*out = new(PodSecurityAdmissionLevel)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetSpec.
func (in *ClusterSetSpec) DeepCopy() *ClusterSetSpec {
if in == nil {
return nil
}
out := new(ClusterSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSetStatus) DeepCopyInto(out *ClusterSetStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetStatus.
func (in *ClusterSetStatus) DeepCopy() *ClusterSetStatus {
if in == nil {
return nil
}
out := new(ClusterSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = *in
@@ -260,6 +97,12 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(int32)
**out = **in
}
in.Persistence.DeepCopyInto(&out.Persistence)
if in.Expose != nil {
in, out := &in.Expose, &out.Expose
*out = new(ExposeConfig)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
@@ -267,16 +110,16 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
(*out)[key] = val
}
}
if in.Limit != nil {
in, out := &in.Limit, &out.Limit
*out = new(ClusterLimit)
(*in).DeepCopyInto(*out)
}
if in.TokenSecretRef != nil {
in, out := &in.TokenSecretRef, &out.TokenSecretRef
*out = new(v1.SecretReference)
**out = **in
}
if in.TLSSANs != nil {
in, out := &in.TLSSANs, &out.TLSSANs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ServerArgs != nil {
in, out := &in.ServerArgs, &out.ServerArgs
*out = make([]string, len(*in))
@@ -287,23 +130,40 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.TLSSANs != nil {
in, out := &in.TLSSANs, &out.TLSSANs
*out = make([]string, len(*in))
copy(*out, *in)
if in.ServerEnvs != nil {
in, out := &in.ServerEnvs, &out.ServerEnvs
*out = make([]v1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AgentEnvs != nil {
in, out := &in.AgentEnvs, &out.AgentEnvs
*out = make([]v1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Addons != nil {
in, out := &in.Addons, &out.Addons
*out = make([]Addon, len(*in))
copy(*out, *in)
}
in.Persistence.DeepCopyInto(&out.Persistence)
if in.Expose != nil {
in, out := &in.Expose, &out.Expose
*out = new(ExposeConfig)
(*in).DeepCopyInto(*out)
if in.ServerLimit != nil {
in, out := &in.ServerLimit, &out.ServerLimit
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
if in.WorkerLimit != nil {
in, out := &in.WorkerLimit, &out.WorkerLimit
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
out.CustomCAs = in.CustomCAs
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
@@ -324,8 +184,13 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Persistence.DeepCopyInto(&out.Persistence)
return
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus.
@@ -338,6 +203,58 @@ func (in *ClusterStatus) DeepCopy() *ClusterStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CredentialSource) DeepCopyInto(out *CredentialSource) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialSource.
func (in *CredentialSource) DeepCopy() *CredentialSource {
if in == nil {
return nil
}
out := new(CredentialSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CredentialSources) DeepCopyInto(out *CredentialSources) {
*out = *in
out.ServerCA = in.ServerCA
out.ClientCA = in.ClientCA
out.RequestHeaderCA = in.RequestHeaderCA
out.ETCDServerCA = in.ETCDServerCA
out.ETCDPeerCA = in.ETCDPeerCA
out.ServiceAccountToken = in.ServiceAccountToken
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialSources.
func (in *CredentialSources) DeepCopy() *CredentialSources {
if in == nil {
return nil
}
out := new(CredentialSources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomCAs) DeepCopyInto(out *CustomCAs) {
*out = *in
out.Sources = in.Sources
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomCAs.
func (in *CustomCAs) DeepCopy() *CustomCAs {
if in == nil {
return nil
}
out := new(CustomCAs)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExposeConfig) DeepCopyInto(out *ExposeConfig) {
*out = *in
@@ -349,14 +266,13 @@ func (in *ExposeConfig) DeepCopyInto(out *ExposeConfig) {
if in.LoadBalancer != nil {
in, out := &in.LoadBalancer, &out.LoadBalancer
*out = new(LoadBalancerConfig)
**out = **in
(*in).DeepCopyInto(*out)
}
if in.NodePort != nil {
in, out := &in.NodePort, &out.NodePort
*out = new(NodePortConfig)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExposeConfig.
@@ -379,7 +295,6 @@ func (in *IngressConfig) DeepCopyInto(out *IngressConfig) {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressConfig.
@@ -395,7 +310,16 @@ func (in *IngressConfig) DeepCopy() *IngressConfig {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerConfig) DeepCopyInto(out *LoadBalancerConfig) {
*out = *in
return
if in.ServerPort != nil {
in, out := &in.ServerPort, &out.ServerPort
*out = new(int32)
**out = **in
}
if in.ETCDPort != nil {
in, out := &in.ETCDPort, &out.ETCDPort
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerConfig.
@@ -416,17 +340,11 @@ func (in *NodePortConfig) DeepCopyInto(out *NodePortConfig) {
*out = new(int32)
**out = **in
}
if in.ServicePort != nil {
in, out := &in.ServicePort, &out.ServicePort
*out = new(int32)
**out = **in
}
if in.ETCDPort != nil {
in, out := &in.ETCDPort, &out.ETCDPort
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortConfig.
@@ -447,7 +365,6 @@ func (in *PersistenceConfig) DeepCopyInto(out *PersistenceConfig) {
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistenceConfig.
@@ -459,3 +376,121 @@ func (in *PersistenceConfig) DeepCopy() *PersistenceConfig {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualClusterPolicy) DeepCopyInto(out *VirtualClusterPolicy) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicy.
func (in *VirtualClusterPolicy) DeepCopy() *VirtualClusterPolicy {
if in == nil {
return nil
}
out := new(VirtualClusterPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VirtualClusterPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualClusterPolicyList) DeepCopyInto(out *VirtualClusterPolicyList) {
*out = *in
in.ListMeta.DeepCopyInto(&out.ListMeta)
out.TypeMeta = in.TypeMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]VirtualClusterPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicyList.
func (in *VirtualClusterPolicyList) DeepCopy() *VirtualClusterPolicyList {
if in == nil {
return nil
}
out := new(VirtualClusterPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VirtualClusterPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualClusterPolicySpec) DeepCopyInto(out *VirtualClusterPolicySpec) {
*out = *in
if in.Quota != nil {
in, out := &in.Quota, &out.Quota
*out = new(v1.ResourceQuotaSpec)
(*in).DeepCopyInto(*out)
}
if in.Limit != nil {
in, out := &in.Limit, &out.Limit
*out = new(v1.LimitRangeSpec)
(*in).DeepCopyInto(*out)
}
if in.DefaultNodeSelector != nil {
in, out := &in.DefaultNodeSelector, &out.DefaultNodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.PodSecurityAdmissionLevel != nil {
in, out := &in.PodSecurityAdmissionLevel, &out.PodSecurityAdmissionLevel
*out = new(PodSecurityAdmissionLevel)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicySpec.
func (in *VirtualClusterPolicySpec) DeepCopy() *VirtualClusterPolicySpec {
if in == nil {
return nil
}
out := new(VirtualClusterPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualClusterPolicyStatus) DeepCopyInto(out *VirtualClusterPolicyStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicyStatus.
func (in *VirtualClusterPolicyStatus) DeepCopy() *VirtualClusterPolicyStatus {
if in == nil {
return nil
}
out := new(VirtualClusterPolicyStatus)
in.DeepCopyInto(out)
return out
}

View File

@@ -4,12 +4,15 @@ import (
"context"
"fmt"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
)
const (
@@ -41,14 +44,21 @@ func configSecretName(clusterName string) string {
func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object) error {
log := ctrl.LoggerFrom(ctx)
result, err := controllerutil.CreateOrUpdate(ctx, cfg.client, obj, func() error {
return controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme)
})
key := ctrlruntimeclient.ObjectKeyFromObject(obj)
if result != controllerutil.OperationResultNone {
key := ctrlruntimeclient.ObjectKeyFromObject(obj)
log.Info(fmt.Sprintf("ensuring %T", obj), "key", key, "result", result)
log.Info(fmt.Sprintf("ensuring %T", obj), "key", key)
if err := controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme); err != nil {
return err
}
return err
if err := cfg.client.Create(ctx, obj); err != nil {
if apierrors.IsAlreadyExists(err) {
return cfg.client.Update(ctx, obj)
}
return err
}
return nil
}

View File

@@ -0,0 +1,253 @@
package agent
import (
"context"
"fmt"
"os"
"gopkg.in/yaml.v2"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
"sigs.k8s.io/controller-runtime/pkg/manager"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
const (
kubeletPortRangeConfigMapName = "k3k-kubelet-port-range"
webhookPortRangeConfigMapName = "k3k-webhook-port-range"
rangeKey = "range"
allocatedPortsKey = "allocatedPorts"
snapshotDataKey = "snapshotData"
)
type PortAllocator struct {
ctrlruntimeclient.Client
KubeletCM *v1.ConfigMap
WebhookCM *v1.ConfigMap
}
func NewPortAllocator(ctx context.Context, client ctrlruntimeclient.Client) (*PortAllocator, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("starting port allocator")
portRangeConfigMapNamespace := os.Getenv("CONTROLLER_NAMESPACE")
if portRangeConfigMapNamespace == "" {
return nil, fmt.Errorf("failed to find k3k controller namespace")
}
var kubeletPortRangeCM, webhookPortRangeCM v1.ConfigMap
kubeletPortRangeCM.Name = kubeletPortRangeConfigMapName
kubeletPortRangeCM.Namespace = portRangeConfigMapNamespace
webhookPortRangeCM.Name = webhookPortRangeConfigMapName
webhookPortRangeCM.Namespace = portRangeConfigMapNamespace
return &PortAllocator{
Client: client,
KubeletCM: &kubeletPortRangeCM,
WebhookCM: &webhookPortRangeCM,
}, nil
}
func (a *PortAllocator) InitPortAllocatorConfig(ctx context.Context, client ctrlruntimeclient.Client, kubeletPortRange, webhookPortRange string) manager.Runnable {
return manager.RunnableFunc(func(ctx context.Context) error {
if err := a.getOrCreate(ctx, a.KubeletCM, kubeletPortRange); err != nil {
return err
}
if err := a.getOrCreate(ctx, a.WebhookCM, webhookPortRange); err != nil {
return err
}
return nil
})
}
func (a *PortAllocator) getOrCreate(ctx context.Context, configmap *v1.ConfigMap, portRange string) error {
nn := types.NamespacedName{
Name: configmap.Name,
Namespace: configmap.Namespace,
}
if err := a.Get(ctx, nn, configmap); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
// creating the configMap for the first time
configmap.Data = map[string]string{
rangeKey: portRange,
allocatedPortsKey: "",
}
configmap.BinaryData = map[string][]byte{
snapshotDataKey: []byte(""),
}
if err := a.Create(ctx, configmap); err != nil {
return fmt.Errorf("failed to create port range configmap: %w", err)
}
}
return nil
}
func (a *PortAllocator) AllocateWebhookPort(ctx context.Context, clusterName, clusterNamespace string) (int, error) {
return a.allocatePort(ctx, clusterName, clusterNamespace, a.WebhookCM)
}
func (a *PortAllocator) DeallocateWebhookPort(ctx context.Context, clusterName, clusterNamespace string, webhookPort int) error {
return a.deallocatePort(ctx, clusterName, clusterNamespace, a.WebhookCM, webhookPort)
}
func (a *PortAllocator) AllocateKubeletPort(ctx context.Context, clusterName, clusterNamespace string) (int, error) {
return a.allocatePort(ctx, clusterName, clusterNamespace, a.KubeletCM)
}
func (a *PortAllocator) DeallocateKubeletPort(ctx context.Context, clusterName, clusterNamespace string, kubeletPort int) error {
return a.deallocatePort(ctx, clusterName, clusterNamespace, a.KubeletCM, kubeletPort)
}
// allocatePort will assign port to the cluster from a port Range configured for k3k
func (a *PortAllocator) allocatePort(ctx context.Context, clusterName, clusterNamespace string, configMap *v1.ConfigMap) (int, error) {
portRange, ok := configMap.Data[rangeKey]
if !ok {
return 0, fmt.Errorf("port range is not initialized")
}
// get configMap first to avoid conflicts
if err := a.getOrCreate(ctx, configMap, portRange); err != nil {
return 0, err
}
clusterNamespaceName := clusterNamespace + "/" + clusterName
portsMap, err := parsePortMap(configMap.Data[allocatedPortsKey])
if err != nil {
return 0, err
}
if _, ok := portsMap[clusterNamespaceName]; ok {
return portsMap[clusterNamespaceName], nil
}
// allocate a new port and save the snapshot
snapshot := core.RangeAllocation{
Range: configMap.Data[rangeKey],
Data: configMap.BinaryData[snapshotDataKey],
}
pa, err := portallocator.NewFromSnapshot(&snapshot)
if err != nil {
return 0, err
}
next, err := pa.AllocateNext()
if err != nil {
return 0, err
}
portsMap[clusterNamespaceName] = next
if err := saveSnapshot(pa, &snapshot, configMap, portsMap); err != nil {
return 0, err
}
if err := a.Update(ctx, configMap); err != nil {
return 0, err
}
return next, nil
}
// deallocatePort will remove the port used by the cluster from the port range
func (a *PortAllocator) deallocatePort(ctx context.Context, clusterName, clusterNamespace string, configMap *v1.ConfigMap, port int) error {
portRange, ok := configMap.Data[rangeKey]
if !ok {
return fmt.Errorf("port range is not initialized")
}
if err := a.getOrCreate(ctx, configMap, portRange); err != nil {
return err
}
clusterNamespaceName := clusterNamespace + "/" + clusterName
portsMap, err := parsePortMap(configMap.Data[allocatedPortsKey])
if err != nil {
return err
}
// check if the cluster already exists in the configMap
if usedPort, ok := portsMap[clusterNamespaceName]; ok {
if usedPort != port {
return fmt.Errorf("port %d does not match used port %d for the cluster", port, usedPort)
}
snapshot := core.RangeAllocation{
Range: configMap.Data[rangeKey],
Data: configMap.BinaryData[snapshotDataKey],
}
pa, err := portallocator.NewFromSnapshot(&snapshot)
if err != nil {
return err
}
if err := pa.Release(port); err != nil {
return err
}
delete(portsMap, clusterNamespaceName)
if err := saveSnapshot(pa, &snapshot, configMap, portsMap); err != nil {
return err
}
}
return a.Update(ctx, configMap)
}
// parsePortMap will convert ConfigMap Data to a portMap of string keys and values of ints
func parsePortMap(portMapData string) (map[string]int, error) {
portMap := make(map[string]int)
if err := yaml.Unmarshal([]byte(portMapData), &portMap); err != nil {
return nil, fmt.Errorf("failed to parse allocatedPorts: %w", err)
}
return portMap, nil
}
// serializePortMap will convert a portMap of string keys and values of ints to ConfigMap Data
func serializePortMap(m map[string]int) (string, error) {
out, err := yaml.Marshal(m)
if err != nil {
return "", fmt.Errorf("failed to serialize allocatedPorts: %w", err)
}
return string(out), nil
}
func saveSnapshot(portAllocator *portallocator.PortAllocator, snapshot *core.RangeAllocation, configMap *v1.ConfigMap, portsMap map[string]int) error {
// save the new snapshot
if err := portAllocator.Snapshot(snapshot); err != nil {
return err
}
// update the configmap with the new portsMap and the new snapshot
configMap.BinaryData[snapshotDataKey] = snapshot.Data
configMap.Data[rangeKey] = snapshot.Range
allocatedPortsData, err := serializePortMap(portsMap)
if err != nil {
return err
}
configMap.Data[allocatedPortsKey] = allocatedPortsData
return nil
}

View File

@@ -8,24 +8,25 @@ import (
"fmt"
"time"
"k8s.io/apimachinery/pkg/util/intstr"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
)
const (
sharedKubeletConfigPath = "/opt/rancher/k3k/config.yaml"
SharedNodeAgentName = "kubelet"
SharedNodeMode = "shared"
SharedNodeAgentName = "kubelet"
SharedNodeMode = "shared"
)
type SharedAgent struct {
@@ -34,15 +35,19 @@ type SharedAgent struct {
image string
imagePullPolicy string
token string
kubeletPort int
webhookPort int
}
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string) *SharedAgent {
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string, kubeletPort, webhookPort int) *SharedAgent {
return &SharedAgent{
Config: config,
serviceIP: serviceIP,
image: image,
imagePullPolicy: imagePullPolicy,
token: token,
kubeletPort: kubeletPort,
webhookPort: webhookPort,
}
}
@@ -72,7 +77,7 @@ func (s *SharedAgent) ensureObject(ctx context.Context, obj ctrlruntimeclient.Ob
}
func (s *SharedAgent) config(ctx context.Context) error {
config := sharedAgentData(s.cluster, s.Name(), s.token, s.serviceIP)
config := sharedAgentData(s.cluster, s.Name(), s.token, s.serviceIP, s.kubeletPort, s.webhookPort)
configSecret := &v1.Secret{
TypeMeta: metav1.TypeMeta{
@@ -91,7 +96,7 @@ func (s *SharedAgent) config(ctx context.Context) error {
return s.ensureObject(ctx, configSecret)
}
func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string) string {
func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string, kubeletPort, webhookPort int) string {
version := cluster.Spec.Version
if cluster.Spec.Version == "" {
version = cluster.Status.HostVersion
@@ -101,9 +106,12 @@ func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string) s
clusterNamespace: %s
serverIP: %s
serviceName: %s
token: %s
version: %s`,
cluster.Name, cluster.Namespace, ip, serviceName, token, version)
token: %v
mirrorHostNodes: %t
version: %s
webhookPort: %d
kubeletPort: %d`,
cluster.Name, cluster.Namespace, ip, serviceName, token, cluster.Spec.MirrorHostNodes, version, webhookPort, kubeletPort)
}
func (s *SharedAgent) daemonset(ctx context.Context) error {
@@ -140,7 +148,17 @@ func (s *SharedAgent) daemonset(ctx context.Context) error {
}
func (s *SharedAgent) podSpec() v1.PodSpec {
hostNetwork := false
dnsPolicy := v1.DNSClusterFirst
if s.cluster.Spec.MirrorHostNodes {
hostNetwork = true
dnsPolicy = v1.DNSClusterFirstWithHostNet
}
return v1.PodSpec{
HostNetwork: hostNetwork,
DNSPolicy: dnsPolicy,
ServiceAccountName: s.Name(),
NodeSelector: s.cluster.Spec.NodeSelector,
Volumes: []v1.Volume{
@@ -189,11 +207,7 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{},
},
Args: []string{
"--config",
sharedKubeletConfigPath,
},
Env: []v1.EnvVar{
Env: append([]v1.EnvVar{
{
Name: "AGENT_HOSTNAME",
ValueFrom: &v1.EnvVarSource{
@@ -203,7 +217,16 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
},
},
},
},
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIP",
},
},
},
}, s.cluster.Spec.AgentEnvs...),
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
@@ -217,10 +240,15 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
},
},
Ports: []v1.ContainerPort{
{
Name: "kubelet-port",
Protocol: v1.ProtocolTCP,
ContainerPort: int32(s.kubeletPort),
},
{
Name: "webhook-port",
Protocol: v1.ProtocolTCP,
ContainerPort: 9443,
ContainerPort: int32(s.webhookPort),
},
},
},
@@ -249,13 +277,13 @@ func (s *SharedAgent) service(ctx context.Context) error {
{
Name: "k3s-kubelet-port",
Protocol: v1.ProtocolTCP,
Port: 10250,
Port: int32(s.kubeletPort),
},
{
Name: "webhook-server",
Protocol: v1.ProtocolTCP,
Port: 9443,
TargetPort: intstr.FromInt32(9443),
Port: int32(s.webhookPort),
TargetPort: intstr.FromInt32(int32(s.webhookPort)),
},
},
},
@@ -336,7 +364,7 @@ func (s *SharedAgent) role(ctx context.Context) error {
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"persistentvolumeclaims", "pods", "pods/log", "pods/exec", "secrets", "configmaps", "services"},
Resources: []string{"persistentvolumeclaims", "pods", "pods/log", "pods/attach", "pods/exec", "pods/ephemeralcontainers", "secrets", "configmaps", "services"},
Verbs: []string{"*"},
},
{

View File

@@ -3,10 +3,12 @@ package agent
import (
"testing"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
)
func Test_sharedAgentData(t *testing.T) {
@@ -14,6 +16,8 @@ func Test_sharedAgentData(t *testing.T) {
cluster *v1alpha1.Cluster
serviceName string
ip string
kubeletPort int
webhookPort int
token string
}
@@ -34,6 +38,8 @@ func Test_sharedAgentData(t *testing.T) {
Version: "v1.2.3",
},
},
kubeletPort: 10250,
webhookPort: 9443,
ip: "10.0.0.21",
serviceName: "service-name",
token: "dnjklsdjnksd892389238",
@@ -45,6 +51,9 @@ func Test_sharedAgentData(t *testing.T) {
"serviceName": "service-name",
"token": "dnjklsdjnksd892389238",
"version": "v1.2.3",
"mirrorHostNodes": "false",
"kubeletPort": "10250",
"webhookPort": "9443",
},
},
{
@@ -63,6 +72,8 @@ func Test_sharedAgentData(t *testing.T) {
},
},
ip: "10.0.0.21",
kubeletPort: 10250,
webhookPort: 9443,
serviceName: "service-name",
token: "dnjklsdjnksd892389238",
},
@@ -73,6 +84,9 @@ func Test_sharedAgentData(t *testing.T) {
"serviceName": "service-name",
"token": "dnjklsdjnksd892389238",
"version": "v1.2.3",
"mirrorHostNodes": "false",
"kubeletPort": "10250",
"webhookPort": "9443",
},
},
{
@@ -87,6 +101,8 @@ func Test_sharedAgentData(t *testing.T) {
HostVersion: "v1.3.3",
},
},
kubeletPort: 10250,
webhookPort: 9443,
ip: "10.0.0.21",
serviceName: "service-name",
token: "dnjklsdjnksd892389238",
@@ -98,13 +114,16 @@ func Test_sharedAgentData(t *testing.T) {
"serviceName": "service-name",
"token": "dnjklsdjnksd892389238",
"version": "v1.3.3",
"mirrorHostNodes": "false",
"kubeletPort": "10250",
"webhookPort": "9443",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip)
config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip, tt.args.kubeletPort, tt.args.webhookPort)
data := make(map[string]string)
err := yaml.Unmarshal([]byte(config), data)

View File

@@ -5,12 +5,14 @@ import (
"errors"
"fmt"
"github.com/rancher/k3k/pkg/controller"
"k8s.io/utils/ptr"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller"
)
const (
@@ -20,15 +22,19 @@ const (
type VirtualAgent struct {
*Config
serviceIP string
token string
serviceIP string
token string
k3SImage string
k3SImagePullPolicy string
}
func NewVirtualAgent(config *Config, serviceIP, token string) *VirtualAgent {
func NewVirtualAgent(config *Config, serviceIP, token string, k3SImage string, k3SImagePullPolicy string) *VirtualAgent {
return &VirtualAgent{
Config: config,
serviceIP: serviceIP,
token: token,
Config: config,
serviceIP: serviceIP,
token: token,
k3SImage: k3SImage,
k3SImagePullPolicy: k3SImagePullPolicy,
}
}
@@ -72,13 +78,13 @@ func (v *VirtualAgent) config(ctx context.Context) error {
}
func virtualAgentData(serviceIP, token string) string {
return fmt.Sprintf(`server: https://%s:6443
return fmt.Sprintf(`server: https://%s
token: %s
with-node-id: true`, serviceIP, token)
}
func (v *VirtualAgent) deployment(ctx context.Context) error {
image := controller.K3SImage(v.cluster)
image := controller.K3SImage(v.cluster, v.k3SImage)
const name = "k3k-agent"
@@ -175,8 +181,9 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
},
Containers: []v1.Container{
{
Name: name,
Image: image,
Name: name,
Image: image,
ImagePullPolicy: v1.PullPolicy(v.k3SImagePullPolicy),
SecurityContext: &v1.SecurityContext{
Privileged: ptr.To(true),
},
@@ -187,6 +194,7 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
Resources: v1.ResourceRequirements{
Limits: limit,
},
Env: v.cluster.Spec.AgentEnvs,
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
@@ -228,5 +236,12 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
},
}
// specify resource limits if specified for the servers.
if v.cluster.Spec.WorkerLimit != nil {
podSpec.Containers[0].Resources = v1.ResourceRequirements{
Limits: v.cluster.Spec.WorkerLimit,
}
}
return podSpec
}

View File

@@ -25,7 +25,7 @@ func Test_virtualAgentData(t *testing.T) {
token: "dnjklsdjnksd892389238",
},
expectedData: map[string]string{
"server": "https://10.0.0.21:6443",
"server": "https://10.0.0.21",
"token": "dnjklsdjnksd892389238",
"with-node-id": "true",
},

View File

@@ -5,32 +5,41 @@ import (
"errors"
"fmt"
"net"
"reflect"
"slices"
"strings"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
ctrlruntimecontroller "sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/rancher/k3k/pkg/controller/policy"
)
const (
@@ -40,26 +49,32 @@ const (
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
ClusterInvalidName = "system"
maxConcurrentReconciles = 1
defaultVirtualClusterCIDR = "10.52.0.0/16"
defaultVirtualServiceCIDR = "10.53.0.0/16"
defaultSharedClusterCIDR = "10.42.0.0/16"
defaultSharedServiceCIDR = "10.43.0.0/16"
memberRemovalTimeout = time.Minute * 1
)
defaultVirtualClusterCIDR = "10.52.0.0/16"
defaultVirtualServiceCIDR = "10.53.0.0/16"
defaultSharedClusterCIDR = "10.42.0.0/16"
defaultSharedServiceCIDR = "10.43.0.0/16"
defaultStoragePersistentSize = "1G"
memberRemovalTimeout = time.Minute * 1
var (
ErrClusterValidation = errors.New("cluster validation error")
ErrCustomCACertSecretMissing = errors.New("custom CA certificate secret is missing")
)
type ClusterReconciler struct {
DiscoveryClient *discovery.DiscoveryClient
Client ctrlruntimeclient.Client
Scheme *runtime.Scheme
DiscoveryClient *discovery.DiscoveryClient
Client client.Client
Scheme *runtime.Scheme
record.EventRecorder
SharedAgentImage string
SharedAgentImagePullPolicy string
K3SImage string
K3SImagePullPolicy string
PortAllocator *agent.PortAllocator
}
// Add adds a new controller to the manager
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy string) error {
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage string, k3SImagePullPolicy string, maxConcurrentReconciles int, portAllocator *agent.PortAllocator, eventRecorder record.EventRecorder) error {
discoveryClient, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
return err
@@ -69,24 +84,67 @@ func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgent
return errors.New("missing shared agent image")
}
if eventRecorder == nil {
eventRecorder = mgr.GetEventRecorderFor(clusterController)
}
// initialize a new Reconciler
reconciler := ClusterReconciler{
DiscoveryClient: discoveryClient,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
EventRecorder: eventRecorder,
SharedAgentImage: sharedAgentImage,
SharedAgentImagePullPolicy: sharedAgentImagePullPolicy,
K3SImage: k3SImage,
K3SImagePullPolicy: k3SImagePullPolicy,
PortAllocator: portAllocator,
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.Cluster{}).
WithOptions(ctrlruntimecontroller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Watches(&v1.Namespace{}, namespaceEventHandler(&reconciler)).
Owns(&apps.StatefulSet{}).
Owns(&v1.Service{}).
WithOptions(ctrlcontroller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
Complete(&reconciler)
}
func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
return handler.Funcs{
// We don't need to update for create or delete events
CreateFunc: func(context.Context, event.CreateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) {},
DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) {},
// When a Namespace is updated, if it has the "policy.k3k.io/policy-name" label
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
oldNs, okOld := e.ObjectOld.(*v1.Namespace)
newNs, okNew := e.ObjectNew.(*v1.Namespace)
if !okOld || !okNew {
return
}
oldVCPName := oldNs.Labels[policy.PolicyNameLabelKey]
newVCPName := newNs.Labels[policy.PolicyNameLabelKey]
// If policy hasn't changed we can skip the reconciliation
if oldVCPName == newVCPName {
return
}
// Enqueue all the Cluster in the namespace
var clusterList v1alpha1.ClusterList
if err := r.Client.List(ctx, &clusterList, client.InNamespace(oldNs.Name)); err != nil {
return
}
for _, cluster := range clusterList.Items {
q.Add(reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&cluster)})
}
},
}
}
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", req.NamespacedName)
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
@@ -95,27 +153,45 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
var cluster v1alpha1.Cluster
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, err
return reconcile.Result{}, client.IgnoreNotFound(err)
}
// if DeletionTimestamp is not Zero -> finalize the object
if !cluster.DeletionTimestamp.IsZero() {
return c.finalizeCluster(ctx, cluster)
return c.finalizeCluster(ctx, &cluster)
}
// add finalizers
if !controllerutil.AddFinalizer(&cluster, clusterFinalizerName) {
// Set initial status if not already set
if cluster.Status.Phase == "" || cluster.Status.Phase == v1alpha1.ClusterUnknown {
cluster.Status.Phase = v1alpha1.ClusterProvisioning
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
Reason: ReasonProvisioning,
Message: "Cluster is being provisioned",
})
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{Requeue: true}, nil
}
// add finalizer
if controllerutil.AddFinalizer(&cluster, clusterFinalizerName) {
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{Requeue: true}, nil
}
orig := cluster.DeepCopy()
reconcilerErr := c.reconcileCluster(ctx, &cluster)
// update Status if needed
if !reflect.DeepEqual(orig.Status, cluster.Status) {
if !equality.Semantic.DeepEqual(orig.Status, cluster.Status) {
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
@@ -123,11 +199,16 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
// if there was an error during the reconciliation, return
if reconcilerErr != nil {
if errors.Is(reconcilerErr, bootstrap.ErrServerNotReady) {
log.Info("server not ready, requeueing")
return reconcile.Result{RequeueAfter: time.Second * 10}, nil
}
return reconcile.Result{}, reconcilerErr
}
// update Cluster if needed
if !reflect.DeepEqual(orig.Spec, cluster.Spec) {
if !equality.Semantic.DeepEqual(orig.Spec, cluster.Spec) {
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
@@ -137,8 +218,34 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
err := c.reconcile(ctx, cluster)
c.updateStatus(cluster, err)
return err
}
func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
var ns v1.Namespace
if err := c.Client.Get(ctx, client.ObjectKey{Name: cluster.Namespace}, &ns); err != nil {
return err
}
policyName, found := ns.Labels[policy.PolicyNameLabelKey]
cluster.Status.PolicyName = policyName
if found && policyName != "" {
var policy v1alpha1.VirtualClusterPolicy
if err := c.Client.Get(ctx, client.ObjectKey{Name: policyName}, &policy); err != nil {
return err
}
if err := c.validate(cluster, policy); err != nil {
return err
}
}
// if the Version is not specified we will try to use the same Kubernetes version of the host.
// This version is stored in the Status object, and it will not be updated if already set.
if cluster.Spec.Version == "" && cluster.Status.HostVersion == "" {
@@ -154,24 +261,12 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
cluster.Status.HostVersion = k8sVersion + "-k3s1"
}
// TODO: update status?
if err := c.validate(cluster); err != nil {
log.Error(err, "invalid change")
return nil
}
token, err := c.token(ctx, cluster)
if err != nil {
return err
}
s := server.New(cluster, c.Client, token, string(cluster.Spec.Mode))
cluster.Status.Persistence = cluster.Spec.Persistence
if cluster.Spec.Persistence.StorageRequestSize == "" {
// default to 1G of request size
cluster.Status.Persistence.StorageRequestSize = defaultStoragePersistentSize
}
s := server.New(cluster, c.Client, token, c.K3SImage, c.K3SImagePullPolicy)
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
if cluster.Status.ClusterCIDR == "" {
@@ -188,7 +283,6 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
log.Info("looking up Service CIDR for shared mode")
cluster.Status.ServiceCIDR, err = c.lookupServiceCIDR(ctx)
if err != nil {
log.Error(err, "error while looking up Cluster Service CIDR")
@@ -235,7 +329,11 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
return err
}
return c.bindNodeProxyClusterRole(ctx, cluster)
if err := c.ensureKubeconfigSecret(ctx, cluster, serviceIP, 443); err != nil {
return err
}
return c.bindClusterRoles(ctx, cluster)
}
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
@@ -270,6 +368,45 @@ func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *
return err
}
// ensureKubeconfigSecret will create or update the Secret containing the kubeconfig data from the k3s server
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string, port int) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring kubeconfig secret")
adminKubeconfig := kubeconfig.New()
kubeconfig, err := adminKubeconfig.Generate(ctx, c.Client, cluster, serviceIP, port)
if err != nil {
return err
}
kubeconfigData, err := clientcmd.Write(*kubeconfig)
if err != nil {
return err
}
kubeconfigSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: controller.SafeConcatNameWithPrefix(cluster.Name, "kubeconfig"),
Namespace: cluster.Namespace,
},
}
_, err = controllerutil.CreateOrUpdate(ctx, c.Client, kubeconfigSecret, func() error {
if err := controllerutil.SetControllerReference(cluster, kubeconfigSecret, c.Scheme); err != nil {
return err
}
kubeconfigSecret.Data = map[string][]byte{
"kubeconfig.yaml": kubeconfigData,
}
return nil
})
return err
}
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server, serviceIP string) error {
// create init node config
initServerConfig, err := server.Config(true, serviceIP)
@@ -310,9 +447,23 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring network policy")
networkPolicyName := controller.SafeConcatNameWithPrefix(cluster.Name)
// network policies are managed by the Policy -> delete the one created as a standalone cluster
if cluster.Status.PolicyName != "" {
netpol := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: networkPolicyName,
Namespace: cluster.Namespace,
},
}
return client.IgnoreNotFound(c.Client.Delete(ctx, netpol))
}
expectedNetworkPolicy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: k3kcontroller.SafeConcatNameWithPrefix(cluster.Name),
Name: controller.SafeConcatNameWithPrefix(cluster.Name),
Namespace: cluster.Namespace,
},
TypeMeta: metav1.TypeMeta{
@@ -362,6 +513,7 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
}
currentNetworkPolicy := expectedNetworkPolicy.DeepCopy()
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentNetworkPolicy, func() error {
if err := controllerutil.SetControllerReference(cluster, currentNetworkPolicy, c.Scheme); err != nil {
return err
@@ -371,7 +523,6 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
return nil
})
if err != nil {
return err
}
@@ -389,8 +540,8 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v
log.Info("ensuring cluster service")
expectedService := server.Service(cluster)
currentService := expectedService.DeepCopy()
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentService, func() error {
if err := controllerutil.SetControllerReference(cluster, currentService, c.Scheme); err != nil {
return err
@@ -400,7 +551,6 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v
return nil
})
if err != nil {
return nil, err
}
@@ -426,6 +576,7 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1
}
currentServerIngress := expectedServerIngress.DeepCopy()
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentServerIngress, func() error {
if err := controllerutil.SetControllerReference(cluster, currentServerIngress, c.Scheme); err != nil {
return err
@@ -436,7 +587,6 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1
return nil
})
if err != nil {
return err
}
@@ -488,31 +638,34 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
return err
}
func (c *ClusterReconciler) bindNodeProxyClusterRole(ctx context.Context, cluster *v1alpha1.Cluster) error {
clusterRoleBinding := &rbacv1.ClusterRoleBinding{}
if err := c.Client.Get(ctx, types.NamespacedName{Name: "k3k-node-proxy"}, clusterRoleBinding); err != nil {
return fmt.Errorf("failed to get or find k3k-node-proxy ClusterRoleBinding: %w", err)
}
func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1alpha1.Cluster) error {
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
subjectName := controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName)
var err error
found := false
for _, clusterRole := range clusterRoles {
var clusterRoleBinding rbacv1.ClusterRoleBinding
if getErr := c.Client.Get(ctx, types.NamespacedName{Name: clusterRole}, &clusterRoleBinding); getErr != nil {
err = errors.Join(err, fmt.Errorf("failed to get or find %s ClusterRoleBinding: %w", clusterRole, getErr))
continue
}
for _, subject := range clusterRoleBinding.Subjects {
if subject.Name == subjectName && subject.Namespace == cluster.Namespace {
found = true
clusterSubject := rbacv1.Subject{
Kind: rbacv1.ServiceAccountKind,
Name: controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName),
Namespace: cluster.Namespace,
}
if !slices.Contains(clusterRoleBinding.Subjects, clusterSubject) {
clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, clusterSubject)
if updateErr := c.Client.Update(ctx, &clusterRoleBinding); updateErr != nil {
err = errors.Join(err, fmt.Errorf("failed to update %s ClusterRoleBinding: %w", clusterRole, updateErr))
}
}
}
if !found {
clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbacv1.Subject{
Kind: "ServiceAccount",
Name: subjectName,
Namespace: cluster.Namespace,
})
}
return c.Client.Update(ctx, clusterRoleBinding)
return err
}
func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
@@ -520,17 +673,49 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.C
var agentEnsurer agent.ResourceEnsurer
if cluster.Spec.Mode == agent.VirtualNodeMode {
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token)
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token, c.K3SImage, c.K3SImagePullPolicy)
} else {
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token)
// Assign port from pool if shared agent enabled mirroring of host nodes
kubeletPort := 10250
webhookPort := 9443
if cluster.Spec.MirrorHostNodes {
var err error
kubeletPort, err = c.PortAllocator.AllocateKubeletPort(ctx, cluster.Name, cluster.Namespace)
if err != nil {
return err
}
cluster.Status.KubeletPort = kubeletPort
webhookPort, err = c.PortAllocator.AllocateWebhookPort(ctx, cluster.Name, cluster.Namespace)
if err != nil {
return err
}
cluster.Status.WebhookPort = webhookPort
}
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token, kubeletPort, webhookPort)
}
return agentEnsurer.EnsureResources(ctx)
}
func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster, policy v1alpha1.VirtualClusterPolicy) error {
if cluster.Name == ClusterInvalidName {
return errors.New("invalid cluster name " + cluster.Name + " no action will be taken")
return fmt.Errorf("%w: invalid cluster name %q", ErrClusterValidation, cluster.Name)
}
if cluster.Spec.Mode != policy.Spec.AllowedMode {
return fmt.Errorf("%w: mode %q is not allowed by the policy %q", ErrClusterValidation, cluster.Spec.Mode, policy.Name)
}
if cluster.Spec.CustomCAs.Enabled {
if err := c.validateCustomCACerts(cluster); err != nil {
return fmt.Errorf("%w: %w", ErrClusterValidation, err)
}
}
return nil
@@ -573,11 +758,11 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
log.Info("looking up serviceCIDR from kube-apiserver pod")
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{
matchingLabels := client.MatchingLabels(map[string]string{
"component": "kube-apiserver",
"tier": "control-plane",
})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: "kube-system"}
listOpts := &client.ListOptions{Namespace: "kube-system"}
matchingLabels.ApplyToList(listOpts)
var podList v1.PodList
@@ -612,3 +797,18 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
return "", nil
}
// validateCustomCACerts will make sure that all the cert secrets exists
func (c *ClusterReconciler) validateCustomCACerts(cluster *v1alpha1.Cluster) error {
credentialSources := cluster.Spec.CustomCAs.Sources
if credentialSources.ClientCA.SecretName == "" ||
credentialSources.ServerCA.SecretName == "" ||
credentialSources.ETCDPeerCA.SecretName == "" ||
credentialSources.ETCDServerCA.SecretName == "" ||
credentialSources.RequestHeaderCA.SecretName == "" ||
credentialSources.ServiceAccountToken.SecretName == "" {
return ErrCustomCACertSecretMissing
}
return nil
}

View File

@@ -2,54 +2,58 @@ package cluster
import (
"context"
"errors"
"fmt"
"reflect"
"slices"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster v1alpha1.Cluster) (reconcile.Result, error) {
func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alpha1.Cluster) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("finalizing Cluster")
// remove finalizer from the server pods and update them.
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: cluster.Namespace}
matchingLabels.ApplyToList(listOpts)
// Set the Terminating phase and condition
cluster.Status.Phase = v1alpha1.ClusterTerminating
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
Reason: ReasonTerminating,
Message: "Cluster is being terminated",
})
var podList v1.PodList
if err := c.Client.List(ctx, &podList, listOpts); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
for _, pod := range podList.Items {
if controllerutil.ContainsFinalizer(&pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName)
if err := c.Client.Update(ctx, &pod); err != nil {
return reconcile.Result{}, err
}
}
}
if err := c.unbindNodeProxyClusterRole(ctx, &cluster); err != nil {
if err := c.unbindClusterRoles(ctx, cluster); err != nil {
return reconcile.Result{}, err
}
if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
// remove finalizer from the cluster and update it.
controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName)
// Deallocate ports for kubelet and webhook if used
if cluster.Spec.Mode == v1alpha1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
log.Info("dellocating ports for kubelet and webhook")
if err := c.Client.Update(ctx, &cluster); err != nil {
if err := c.PortAllocator.DeallocateKubeletPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.KubeletPort); err != nil {
return reconcile.Result{}, err
}
if err := c.PortAllocator.DeallocateWebhookPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.WebhookPort); err != nil {
return reconcile.Result{}, err
}
}
// Remove finalizer from the cluster and update it only when all resources are cleaned up
if controllerutil.RemoveFinalizer(cluster, clusterFinalizerName) {
if err := c.Client.Update(ctx, cluster); err != nil {
return reconcile.Result{}, err
}
}
@@ -57,28 +61,37 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster v1alpha
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) unbindNodeProxyClusterRole(ctx context.Context, cluster *v1alpha1.Cluster) error {
clusterRoleBinding := &rbacv1.ClusterRoleBinding{}
if err := c.Client.Get(ctx, types.NamespacedName{Name: "k3k-node-proxy"}, clusterRoleBinding); err != nil {
return fmt.Errorf("failed to get or find k3k-node-proxy ClusterRoleBinding: %w", err)
}
func (c *ClusterReconciler) unbindClusterRoles(ctx context.Context, cluster *v1alpha1.Cluster) error {
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
subjectName := controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName)
var err error
var cleanedSubjects []rbacv1.Subject
for _, clusterRole := range clusterRoles {
var clusterRoleBinding rbacv1.ClusterRoleBinding
if getErr := c.Client.Get(ctx, types.NamespacedName{Name: clusterRole}, &clusterRoleBinding); getErr != nil {
err = errors.Join(err, fmt.Errorf("failed to get or find %s ClusterRoleBinding: %w", clusterRole, getErr))
continue
}
for _, subject := range clusterRoleBinding.Subjects {
if subject.Name != subjectName || subject.Namespace != cluster.Namespace {
cleanedSubjects = append(cleanedSubjects, subject)
clusterSubject := rbacv1.Subject{
Kind: rbacv1.ServiceAccountKind,
Name: controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName),
Namespace: cluster.Namespace,
}
// remove the clusterSubject from the ClusterRoleBinding
cleanedSubjects := slices.DeleteFunc(clusterRoleBinding.Subjects, func(subject rbacv1.Subject) bool {
return reflect.DeepEqual(subject, clusterSubject)
})
if !reflect.DeepEqual(clusterRoleBinding.Subjects, cleanedSubjects) {
clusterRoleBinding.Subjects = cleanedSubjects
if updateErr := c.Client.Update(ctx, &clusterRoleBinding); updateErr != nil {
err = errors.Join(err, fmt.Errorf("failed to update %s ClusterRoleBinding: %w", clusterRole, updateErr))
}
}
}
// if no subject was removed, all good
if reflect.DeepEqual(clusterRoleBinding.Subjects, cleanedSubjects) {
return nil
}
clusterRoleBinding.Subjects = cleanedSubjects
return c.Client.Update(ctx, clusterRoleBinding)
return err
}

View File

@@ -2,24 +2,25 @@ package cluster_test
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/go-logr/zapr"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
@@ -38,12 +39,15 @@ var (
)
var _ = BeforeSuite(func() {
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
ErrorIfCRDPathMissing: true,
}
// setting controller namespace env to activate port range allocator
_ = os.Setenv("CONTROLLER_NAMESPACE", "default")
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
@@ -59,8 +63,15 @@ var _ = BeforeSuite(func() {
mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
portAllocator, err := agent.NewPortAllocator(ctx, mgr.GetClient())
Expect(err).NotTo(HaveOccurred())
err = mgr.Add(portAllocator.InitPortAllocatorConfig(ctx, mgr.GetClient(), "50000-51000", "51001-52000"))
Expect(err).NotTo(HaveOccurred())
ctx, cancel = context.WithCancel(context.Background())
err = cluster.Add(ctx, mgr, "rancher/k3k-kubelet:latest", "")
err = cluster.Add(ctx, mgr, "rancher/k3k-kubelet:latest", "", "rancher/k3s", "", 50, portAllocator, &record.FakeRecorder{})
Expect(err).NotTo(HaveOccurred())
go func() {
@@ -81,13 +92,7 @@ var _ = AfterSuite(func() {
func buildScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
err := corev1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = rbacv1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = appsv1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = networkingv1.AddToScheme(scheme)
err := clientgoscheme.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())

View File

@@ -5,30 +5,31 @@ import (
"fmt"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/server"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), func() {
Context("creating a Cluster", func() {
var (
namespace string
ctx context.Context
)
BeforeEach(func() {
ctx = context.Background()
createdNS := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"}}
err := k8sClient.Create(context.Background(), createdNS)
Expect(err).To(Not(HaveOccurred()))
@@ -36,11 +37,8 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
})
When("creating a Cluster", func() {
var cluster *v1alpha1.Cluster
BeforeEach(func() {
cluster = &v1alpha1.Cluster{
It("will be created with some defaults", func() {
cluster := &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
@@ -49,17 +47,18 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
})
It("will be created with some defaults", func() {
Expect(cluster.Spec.Mode).To(Equal(v1alpha1.SharedClusterMode))
Expect(cluster.Spec.Agents).To(Equal(ptr.To[int32](0)))
Expect(cluster.Spec.Servers).To(Equal(ptr.To[int32](1)))
Expect(cluster.Spec.Version).To(BeEmpty())
// TOFIX
// Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicPersistenceMode))
serverVersion, err := k8s.DiscoveryClient.ServerVersion()
Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicPersistenceMode))
Expect(cluster.Spec.Persistence.StorageRequestSize).To(Equal("1G"))
Expect(cluster.Status.Phase).To(Equal(v1alpha1.ClusterUnknown))
serverVersion, err := k8s.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
expectedHostVersion := fmt.Sprintf("%s-k3s1", serverVersion.GitVersion)
@@ -67,7 +66,6 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)
Expect(err).To(Not(HaveOccurred()))
return cluster.Status.HostVersion
}).
WithTimeout(time.Second * 30).
WithPolling(time.Second).
@@ -92,22 +90,25 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
Expect(spec.Ingress).To(Equal([]networkingv1.NetworkPolicyIngressRule{{}}))
})
When("exposing the cluster with nodePort and custom ports", func() {
It("will have a NodePort service with the specified port exposed", func() {
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
ServerPort: ptr.To[int32](30010),
ServicePort: ptr.To[int32](30011),
ETCDPort: ptr.To[int32](30012),
When("exposing the cluster with nodePort", func() {
It("will have a NodePort service", func() {
cluster := &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
},
},
}
err := k8sClient.Update(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(k8sClient.Create(ctx, cluster)).To(Succeed())
var service v1.Service
var service corev1.Service
Eventually(func() v1.ServiceType {
Eventually(func() corev1.ServiceType {
serviceKey := client.ObjectKey{
Name: server.ServiceName(cluster.Name),
Namespace: cluster.Namespace,
@@ -119,33 +120,147 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
}).
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Equal(v1.ServiceTypeNodePort))
Should(Equal(corev1.ServiceTypeNodePort))
})
It("will have the specified ports exposed when specified", func() {
cluster := &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
ServerPort: ptr.To[int32](30010),
ETCDPort: ptr.To[int32](30011),
},
},
},
}
Expect(k8sClient.Create(ctx, cluster)).To(Succeed())
var service corev1.Service
Eventually(func() corev1.ServiceType {
serviceKey := client.ObjectKey{
Name: server.ServiceName(cluster.Name),
Namespace: cluster.Namespace,
}
err := k8sClient.Get(ctx, serviceKey, &service)
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
return service.Spec.Type
}).
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Equal(corev1.ServiceTypeNodePort))
servicePorts := service.Spec.Ports
Expect(servicePorts).NotTo(BeEmpty())
Expect(servicePorts).To(HaveLen(3))
Expect(servicePorts).To(HaveLen(2))
Expect(servicePorts).To(ContainElement(
And(
HaveField("Name", "k3s-server-port"),
HaveField("Port", BeEquivalentTo(6443)),
HaveField("NodePort", BeEquivalentTo(30010)),
),
))
Expect(servicePorts).To(ContainElement(
And(
HaveField("Name", "k3s-service-port"),
HaveField("Port", BeEquivalentTo(443)),
HaveField("NodePort", BeEquivalentTo(30011)),
),
))
Expect(servicePorts).To(ContainElement(
And(
HaveField("Name", "k3s-etcd-port"),
HaveField("Port", BeEquivalentTo(2379)),
HaveField("NodePort", BeEquivalentTo(30012)),
),
))
serverPort := servicePorts[0]
Expect(serverPort.Name).To(Equal("k3s-server-port"))
Expect(serverPort.Port).To(BeEquivalentTo(443))
Expect(serverPort.NodePort).To(BeEquivalentTo(30010))
etcdPort := servicePorts[1]
Expect(etcdPort.Name).To(Equal("k3s-etcd-port"))
Expect(etcdPort.Port).To(BeEquivalentTo(2379))
Expect(etcdPort.NodePort).To(BeEquivalentTo(30011))
})
It("will not expose the port when out of range", func() {
cluster := &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
ETCDPort: ptr.To[int32](2222),
},
},
},
}
Expect(k8sClient.Create(ctx, cluster)).To(Succeed())
var service corev1.Service
Eventually(func() corev1.ServiceType {
serviceKey := client.ObjectKey{
Name: server.ServiceName(cluster.Name),
Namespace: cluster.Namespace,
}
err := k8sClient.Get(ctx, serviceKey, &service)
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
return service.Spec.Type
}).
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Equal(corev1.ServiceTypeNodePort))
servicePorts := service.Spec.Ports
Expect(servicePorts).NotTo(BeEmpty())
Expect(servicePorts).To(HaveLen(1))
serverPort := servicePorts[0]
Expect(serverPort.Name).To(Equal("k3s-server-port"))
Expect(serverPort.Port).To(BeEquivalentTo(443))
Expect(serverPort.TargetPort.IntValue()).To(BeEquivalentTo(6443))
})
})
When("exposing the cluster with loadbalancer", func() {
It("will have a LoadBalancer service with the default ports exposed", func() {
cluster := &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
LoadBalancer: &v1alpha1.LoadBalancerConfig{},
},
},
}
Expect(k8sClient.Create(ctx, cluster)).To(Succeed())
var service corev1.Service
Eventually(func() error {
serviceKey := client.ObjectKey{
Name: server.ServiceName(cluster.Name),
Namespace: cluster.Namespace,
}
return k8sClient.Get(ctx, serviceKey, &service)
}).
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Succeed())
Expect(service.Spec.Type).To(Equal(corev1.ServiceTypeLoadBalancer))
servicePorts := service.Spec.Ports
Expect(servicePorts).NotTo(BeEmpty())
Expect(servicePorts).To(HaveLen(2))
serverPort := servicePorts[0]
Expect(serverPort.Name).To(Equal("k3s-server-port"))
Expect(serverPort.Port).To(BeEquivalentTo(443))
Expect(serverPort.TargetPort.IntValue()).To(BeEquivalentTo(6443))
etcdPort := servicePorts[1]
Expect(etcdPort.Name).To(Equal("k3s-etcd-port"))
Expect(etcdPort.Port).To(BeEquivalentTo(2379))
Expect(etcdPort.TargetPort.IntValue()).To(BeEquivalentTo(2379))
})
})
})

View File

@@ -9,27 +9,29 @@ import (
"net/url"
"strings"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
clientv3 "go.etcd.io/etcd/client/v3"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
certutil "github.com/rancher/dynamiclistener/cert"
clientv3 "go.etcd.io/etcd/client/v3"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
)
const (
@@ -42,7 +44,7 @@ type PodReconciler struct {
}
// Add adds a new controller to the manager
func AddPodController(ctx context.Context, mgr manager.Manager) error {
func AddPodController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
// initialize a new Reconciler
reconciler := PodReconciler{
Client: mgr.GetClient(),
@@ -52,9 +54,7 @@ func AddPodController(ctx context.Context, mgr manager.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
Watches(&v1.Pod{}, handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &apps.StatefulSet{}, handler.OnlyControllerOwner())).
Named(podController).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
Complete(&reconciler)
}
@@ -152,17 +152,14 @@ func (p *PodReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cl
}
// remove our finalizer from the list and update it.
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
}
if !controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.AddFinalizer(pod, etcdPodFinalizerName)
if controllerutil.AddFinalizer(pod, etcdPodFinalizerName) {
return p.Client.Update(ctx, pod)
}
@@ -242,8 +239,8 @@ func removePeer(ctx context.Context, client *clientv3.Client, name, address stri
if u.Hostname() == address {
log.Info("removing member from etcd", "name", member.Name, "id", member.ID, "address", address)
_, err := client.MemberRemove(ctx, member.ID)
_, err := client.MemberRemove(ctx, member.ID)
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
return nil
}

View File

@@ -8,15 +8,20 @@ import (
"errors"
"fmt"
"net/http"
"syscall"
"time"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var ErrServerNotReady = errors.New("server not ready")
type ControlRuntimeBootstrap struct {
ServerCA content `json:"serverCA"`
ServerCAKey content `json:"serverCAKey"`
@@ -48,7 +53,7 @@ func GenerateBootstrapData(ctx context.Context, cluster *v1alpha1.Cluster, ip, t
}
func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error) {
url := "https://" + serverIP + ":6443/v1-k3s/server-bootstrap"
url := "https://" + serverIP + "/v1-k3s/server-bootstrap"
client := http.Client{
Transport: &http.Transport{
@@ -68,9 +73,16 @@ func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error)
resp, err := client.Do(req)
if err != nil {
if errors.Is(err, syscall.ECONNREFUSED) {
return nil, ErrServerNotReady
}
return nil, err
}
defer resp.Body.Close()
defer func() {
_ = resp.Body.Close()
}()
var runtimeBootstrap ControlRuntimeBootstrap
if err := json.NewDecoder(resp.Body).Decode(&runtimeBootstrap); err != nil {
@@ -86,7 +98,7 @@ func basicAuth(username, password string) string {
}
func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
//client-ca
// client-ca
decoded, err := base64.StdEncoding.DecodeString(bootstrap.ClientCA.Content)
if err != nil {
return err
@@ -94,7 +106,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
bootstrap.ClientCA.Content = string(decoded)
//client-ca-key
// client-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ClientCAKey.Content)
if err != nil {
return err
@@ -102,7 +114,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
bootstrap.ClientCAKey.Content = string(decoded)
//server-ca
// server-ca
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCA.Content)
if err != nil {
return err
@@ -110,7 +122,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
bootstrap.ServerCA.Content = string(decoded)
//server-ca-key
// server-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCAKey.Content)
if err != nil {
return err
@@ -118,7 +130,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
bootstrap.ServerCAKey.Content = string(decoded)
//etcd-ca
// etcd-ca
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ETCDServerCA.Content)
if err != nil {
return err
@@ -126,7 +138,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
bootstrap.ETCDServerCA.Content = string(decoded)
//etcd-ca-key
// etcd-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ETCDServerCAKey.Content)
if err != nil {
return err
@@ -167,6 +179,7 @@ func GetFromSecret(ctx context.Context, client client.Client, cluster *v1alpha1.
}
var bootstrap ControlRuntimeBootstrap
err := json.Unmarshal(bootstrapData, &bootstrap)
return &bootstrap, err

View File

@@ -3,21 +3,28 @@ package server
import (
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) {
name := configSecretName(s.cluster.Name, init)
s.cluster.Status.TLSSANs = append(s.cluster.Spec.TLSSANs,
sans := sets.NewString(s.cluster.Spec.TLSSANs...)
sans.Insert(
serviceIP,
ServiceName(s.cluster.Name),
fmt.Sprintf("%s.%s", ServiceName(s.cluster.Name), s.cluster.Namespace),
)
s.cluster.Status.TLSSANs = sans.List()
config := serverConfigData(serviceIP, s.cluster, s.token)
if init {
config = initConfigData(s.cluster, s.token)
@@ -39,7 +46,7 @@ func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) {
}
func serverConfigData(serviceIP string, cluster *v1alpha1.Cluster, token string) string {
return "cluster-init: true\nserver: https://" + serviceIP + ":6443\n" + serverOptions(cluster, token)
return "cluster-init: true\nserver: https://" + serviceIP + "\n" + serverOptions(cluster, token)
}
func initConfigData(cluster *v1alpha1.Cluster, token string) string {

View File

@@ -3,17 +3,19 @@ package server
import (
"context"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"k8s.io/utils/ptr"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
)
const (
servicePort = 443
serverPort = 6443
etcdPort = 2379
httpsPort = 443
k3sServerPort = 6443
etcdPort = 2379
)
func IngressName(clusterName string) string {
@@ -64,7 +66,7 @@ func ingressRules(cluster *v1alpha1.Cluster) []networkingv1.IngressRule {
Service: &networkingv1.IngressServiceBackend{
Name: ServiceName(cluster.Name),
Port: networkingv1.ServiceBackendPort{
Number: serverPort,
Number: httpsPort,
},
},
},

View File

@@ -3,20 +3,24 @@ package server
import (
"bytes"
"context"
"fmt"
"sort"
"strings"
"text/template"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
const (
@@ -24,33 +28,30 @@ const (
serverName = "server"
configName = "server-config"
initConfigName = "init-server-config"
ServerPort = 6443
)
// Server
type Server struct {
cluster *v1alpha1.Cluster
client client.Client
mode string
token string
cluster *v1alpha1.Cluster
client client.Client
mode string
token string
k3SImage string
k3SImagePullPolicy string
}
func New(cluster *v1alpha1.Cluster, client client.Client, token, mode string) *Server {
func New(cluster *v1alpha1.Cluster, client client.Client, token string, k3SImage string, k3SImagePullPolicy string) *Server {
return &Server{
cluster: cluster,
client: client,
token: token,
mode: mode,
cluster: cluster,
client: client,
token: token,
mode: string(cluster.Spec.Mode),
k3SImage: k3SImage,
k3SImagePullPolicy: k3SImagePullPolicy,
}
}
func (s *Server) podSpec(image, name string, persistent bool, startupCmd string) v1.PodSpec {
var limit v1.ResourceList
if s.cluster.Spec.Limit != nil && s.cluster.Spec.Limit.ServerLimit != nil {
limit = s.cluster.Spec.Limit.ServerLimit
}
podSpec := v1.PodSpec{
NodeSelector: s.cluster.Spec.NodeSelector,
PriorityClassName: s.cluster.Spec.PriorityClass,
@@ -116,11 +117,9 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
},
Containers: []v1.Container{
{
Name: name,
Image: image,
Resources: v1.ResourceRequirements{
Limits: limit,
},
Name: name,
Image: image,
ImagePullPolicy: v1.PullPolicy(s.k3SImagePullPolicy),
Env: []v1.EnvVar{
{
Name: "POD_NAME",
@@ -213,6 +212,20 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
},
},
}
podSpec.Containers[0].LivenessProbe = &v1.Probe{
InitialDelaySeconds: 10,
FailureThreshold: 3,
PeriodSeconds: 3,
ProbeHandler: v1.ProbeHandler{
Exec: &v1.ExecAction{
Command: []string{
"sh",
"-c",
`grep -q "rejoin the cluster" /var/log/k3s.log && exit 1 || exit 0`,
},
},
},
}
// start the pod unprivileged in shared mode
if s.mode == agent.VirtualNodeMode {
podSpec.Containers[0].SecurityContext = &v1.SecurityContext{
@@ -220,6 +233,15 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
}
}
// specify resource limits if specified for the servers.
if s.cluster.Spec.ServerLimit != nil {
podSpec.Containers[0].Resources = v1.ResourceRequirements{
Limits: s.cluster.Spec.ServerLimit,
}
}
podSpec.Containers[0].Env = append(podSpec.Containers[0].Env, s.cluster.Spec.ServerEnvs...)
return podSpec
}
@@ -231,7 +253,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
persistent bool
)
image := controller.K3SImage(s.cluster)
image := controller.K3SImage(s.cluster, s.k3SImage)
name := controller.SafeConcatNameWithPrefix(s.cluster.Name, serverName)
replicas = *s.cluster.Spec.Servers
@@ -301,6 +323,17 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
volumeMounts = append(volumeMounts, volumeMount)
}
if s.cluster.Spec.CustomCAs.Enabled {
vols, mounts, err := s.loadCACertBundle(ctx)
if err != nil {
return nil, err
}
volumes = append(volumes, vols...)
volumeMounts = append(volumeMounts, mounts...)
}
selector := metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": s.cluster.Name,
@@ -361,7 +394,7 @@ func (s *Server) setupDynamicPersistence() v1.PersistentVolumeClaim {
StorageClassName: s.cluster.Spec.Persistence.StorageClassName,
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(s.cluster.Status.Persistence.StorageRequestSize),
"storage": resource.MustParse(s.cluster.Spec.Persistence.StorageRequestSize),
},
},
},
@@ -392,3 +425,103 @@ func (s *Server) setupStartCommand() (string, error) {
return output.String(), nil
}
func (s *Server) loadCACertBundle(ctx context.Context) ([]v1.Volume, []v1.VolumeMount, error) {
customCerts := s.cluster.Spec.CustomCAs.Sources
caCertMap := map[string]string{
"server-ca": customCerts.ServerCA.SecretName,
"client-ca": customCerts.ClientCA.SecretName,
"request-header-ca": customCerts.RequestHeaderCA.SecretName,
"etcd-peer-ca": customCerts.ETCDPeerCA.SecretName,
"etcd-server-ca": customCerts.ETCDServerCA.SecretName,
"service": customCerts.ServiceAccountToken.SecretName,
}
var (
volumes []v1.Volume
mounts []v1.VolumeMount
sortedCertIDs = sortedKeys(caCertMap)
)
for _, certName := range sortedCertIDs {
var certSecret v1.Secret
secretName := string(caCertMap[certName])
key := types.NamespacedName{Name: secretName, Namespace: s.cluster.Namespace}
if err := s.client.Get(ctx, key, &certSecret); err != nil {
return nil, nil, err
}
cert := certSecret.Data["tls.crt"]
keyData := certSecret.Data["tls.key"]
// Service account token secret is an exception (may not contain crt/key).
if certName != "service" && (len(cert) == 0 || len(keyData) == 0) {
return nil, nil, fmt.Errorf("cert or key is not found in secret %s", certName)
}
volumeName := certName + "-vol"
vol, certMounts := s.mountCACert(volumeName, certName, secretName, "tls")
volumes = append(volumes, *vol)
mounts = append(mounts, certMounts...)
}
return volumes, mounts, nil
}
func (s *Server) mountCACert(volumeName, certName, secretName string, subPathMount string) (*v1.Volume, []v1.VolumeMount) {
var (
volume *v1.Volume
mounts []v1.VolumeMount
)
// avoid re-adding secretName in case of combined secret
volume = &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{SecretName: secretName},
},
}
etcdPrefix := ""
mountFile := certName
if strings.HasPrefix(certName, "etcd-") {
etcdPrefix = "/etcd"
mountFile = strings.TrimPrefix(certName, "etcd-")
}
// add the mount for the cert except for the service account token
if certName != "service" {
mounts = append(mounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/var/lib/rancher/k3s/server/tls%s/%s.crt", etcdPrefix, mountFile),
SubPath: subPathMount + ".crt",
})
}
// add the mount for the key
mounts = append(mounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/var/lib/rancher/k3s/server/tls%s/%s.key", etcdPrefix, mountFile),
SubPath: subPathMount + ".key",
})
return volume, mounts
}
func sortedKeys(keyMap map[string]string) []string {
keys := make([]string, 0, len(keyMap))
for k := range keyMap {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}

View File

@@ -1,11 +1,13 @@
package server
import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"k8s.io/apimachinery/pkg/util/intstr"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
)
func Service(cluster *v1alpha1.Cluster) *v1.Service {
@@ -19,7 +21,6 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
Namespace: cluster.Namespace,
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
Selector: map[string]string{
"cluster": cluster.Name,
"role": "server",
@@ -28,16 +29,10 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
}
k3sServerPort := v1.ServicePort{
Name: "k3s-server-port",
Protocol: v1.ProtocolTCP,
Port: serverPort,
}
k3sServicePort := v1.ServicePort{
Name: "k3s-service-port",
Name: "k3s-server-port",
Protocol: v1.ProtocolTCP,
Port: servicePort,
TargetPort: intstr.FromInt(serverPort),
Port: httpsPort,
TargetPort: intstr.FromInt(k3sServerPort),
}
etcdPort := v1.ServicePort{
@@ -46,35 +41,85 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
Port: etcdPort,
}
// If no expose is specified, default to ClusterIP
if cluster.Spec.Expose == nil {
service.Spec.Type = v1.ServiceTypeClusterIP
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort, etcdPort)
}
// If expose is specified, set the type to the appropriate type
if cluster.Spec.Expose != nil {
nodePortConfig := cluster.Spec.Expose.NodePort
if nodePortConfig != nil {
expose := cluster.Spec.Expose
switch {
case expose.LoadBalancer != nil:
service.Spec.Type = v1.ServiceTypeLoadBalancer
addLoadBalancerPorts(service, *expose.LoadBalancer, k3sServerPort, etcdPort)
case expose.NodePort != nil:
service.Spec.Type = v1.ServiceTypeNodePort
if nodePortConfig.ServerPort != nil {
k3sServerPort.NodePort = *nodePortConfig.ServerPort
}
if nodePortConfig.ServicePort != nil {
k3sServicePort.NodePort = *nodePortConfig.ServicePort
}
if nodePortConfig.ETCDPort != nil {
etcdPort.NodePort = *nodePortConfig.ETCDPort
}
addNodePortPorts(service, *expose.NodePort, k3sServerPort, etcdPort)
default:
// default to clusterIP for ingress or empty expose config
service.Spec.Type = v1.ServiceTypeClusterIP
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort, etcdPort)
}
}
service.Spec.Ports = append(
service.Spec.Ports,
k3sServicePort,
etcdPort,
k3sServerPort,
)
return service
}
// addLoadBalancerPorts adds the load balancer ports to the service
func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1alpha1.LoadBalancerConfig, k3sServerPort, etcdPort v1.ServicePort) {
// If the server port is not specified, use the default port
if loadbalancerConfig.ServerPort == nil {
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)
} else if *loadbalancerConfig.ServerPort > 0 {
// If the server port is specified, set the port, otherwise the service will not be exposed
k3sServerPort.Port = *loadbalancerConfig.ServerPort
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)
}
// If the etcd port is not specified, use the default port
if loadbalancerConfig.ETCDPort == nil {
service.Spec.Ports = append(service.Spec.Ports, etcdPort)
} else if *loadbalancerConfig.ETCDPort > 0 {
// If the etcd port is specified, set the port, otherwise the service will not be exposed
etcdPort.Port = *loadbalancerConfig.ETCDPort
service.Spec.Ports = append(service.Spec.Ports, etcdPort)
}
}
// addNodePortPorts adds the node port ports to the service
func addNodePortPorts(service *v1.Service, nodePortConfig v1alpha1.NodePortConfig, k3sServerPort, etcdPort v1.ServicePort) {
// If the server port is not specified Kubernetes will set the node port to a random port between 30000-32767
if nodePortConfig.ServerPort == nil {
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)
} else {
serverNodePort := *nodePortConfig.ServerPort
// If the server port is in the range of 30000-32767, set the node port
// otherwise the service will not be exposed
if serverNodePort >= 30000 && serverNodePort <= 32767 {
k3sServerPort.NodePort = serverNodePort
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)
}
}
// If the etcd port is not specified Kubernetes will set the node port to a random port between 30000-32767
if nodePortConfig.ETCDPort == nil {
service.Spec.Ports = append(service.Spec.Ports, etcdPort)
} else {
etcdNodePort := *nodePortConfig.ETCDPort
// If the etcd port is in the range of 30000-32767, set the node port
// otherwise the service will not be exposed
if etcdNodePort >= 30000 && etcdNodePort <= 32767 {
etcdPort.NodePort = etcdNodePort
service.Spec.Ports = append(service.Spec.Ports, etcdPort)
}
}
}
func (s *Server) StatefulServerService() *v1.Service {
return &v1.Service{
TypeMeta: metav1.TypeMeta{
@@ -94,15 +139,10 @@ func (s *Server) StatefulServerService() *v1.Service {
},
Ports: []v1.ServicePort{
{
Name: "k3s-server-port",
Protocol: v1.ProtocolTCP,
Port: serverPort,
},
{
Name: "k3s-service-port",
Name: "k3s-server-port",
Protocol: v1.ProtocolTCP,
Port: servicePort,
TargetPort: intstr.FromInt(serverPort),
Port: httpsPort,
TargetPort: intstr.FromInt(k3sServerPort),
},
{
Name: "k3s-etcd-port",

View File

@@ -3,14 +3,14 @@ package server
var singleServerTemplate string = `
if [ -d "{{.ETCD_DIR}}" ]; then
# if directory exists then it means its not an initial run
/bin/k3s server --cluster-reset --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}}
/bin/k3s server --cluster-reset --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
fi
rm -f /var/lib/rancher/k3s/server/db/reset-flag
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}}`
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log`
var HAServerTemplate string = `
if [ ${POD_NAME: -1} == 0 ] && [ ! -d "{{.ETCD_DIR}}" ]; then
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}}
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
else
/bin/k3s server --config {{.SERVER_CONFIG}} {{.EXTRA_ARGS}}
/bin/k3s server --config {{.SERVER_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
fi`

View File

@@ -0,0 +1,97 @@
package cluster
import (
"errors"
"k8s.io/apimachinery/pkg/api/meta"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
)
const (
// Condition Types
ConditionReady = "Ready"
// Condition Reasons
ReasonValidationFailed = "ValidationFailed"
ReasonProvisioning = "Provisioning"
ReasonProvisioned = "Provisioned"
ReasonProvisioningFailed = "ProvisioningFailed"
ReasonTerminating = "Terminating"
)
func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr error) {
if !cluster.DeletionTimestamp.IsZero() {
cluster.Status.Phase = v1alpha1.ClusterTerminating
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
Reason: ReasonTerminating,
Message: "Cluster is being terminated",
})
return
}
// Handle validation errors specifically to set the Pending phase.
if errors.Is(reconcileErr, ErrClusterValidation) {
cluster.Status.Phase = v1alpha1.ClusterPending
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
Reason: ReasonValidationFailed,
Message: reconcileErr.Error(),
})
c.Eventf(cluster, v1.EventTypeWarning, ReasonValidationFailed, reconcileErr.Error())
return
}
if errors.Is(reconcileErr, bootstrap.ErrServerNotReady) {
cluster.Status.Phase = v1alpha1.ClusterProvisioning
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
Reason: ReasonProvisioning,
Message: reconcileErr.Error(),
})
return
}
// If there's an error, but it's not a validation error, the cluster is in a failed state.
if reconcileErr != nil {
cluster.Status.Phase = v1alpha1.ClusterFailed
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
Reason: ReasonProvisioningFailed,
Message: reconcileErr.Error(),
})
c.Eventf(cluster, v1.EventTypeWarning, ReasonProvisioningFailed, reconcileErr.Error())
return
}
// If we reach here, everything is successful.
cluster.Status.Phase = v1alpha1.ClusterReady
newCondition := metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionTrue,
Reason: ReasonProvisioned,
Message: "Cluster successfully provisioned",
}
// Only emit event on transition to Ready
if !meta.IsStatusConditionPresentAndEqual(cluster.Status.Conditions, ConditionReady, metav1.ConditionTrue) {
c.Eventf(cluster, v1.EventTypeNormal, ReasonProvisioned, newCondition.Message)
}
meta.SetStatusCondition(&cluster.Status.Conditions, newCondition)
}

Some files were not shown because too many files have changed in this diff Show More