Compare commits

...

105 Commits

Author SHA1 Message Date
enrichman
ee60651602 Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2026-01-29 08:56:00 +00:00
enrichman
a96c67522c Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2026-01-15 13:14:48 +00:00
enrichman
2d2c750769 Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-12-09 14:41:59 +00:00
enrichman
3a7d418576 Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-12-03 14:02:05 +00:00
enrichman
a53827fa32 Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-11-17 17:25:25 +00:00
enrichman
fc9710a83f Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-11-03 15:44:38 +00:00
enrichman
a5dddac72e Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-10-31 16:01:53 +00:00
enrichman
06b53d35a9 Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-10-28 15:30:42 +00:00
enrichman
2ced39df3e Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-10-14 13:19:34 +00:00
galal-hussein
ee458cff4d Update index.yaml
Signed-off-by: galal-hussein <galal-hussein@users.noreply.github.com>
2025-09-30 12:27:29 +00:00
galal-hussein
1a56483285 Update index.yaml
Signed-off-by: galal-hussein <galal-hussein@users.noreply.github.com>
2025-09-17 09:16:58 +00:00
enrichman
d98735583d Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-08-28 08:57:50 +00:00
enrichman
14ba636ee0 Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-08-25 17:03:08 +00:00
enrichman
851b19aade Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-08-19 08:57:52 +00:00
enrichman
8527b444c5 Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-07-24 15:13:44 +00:00
enrichman
c75e07f1c0 Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-06-30 08:45:00 +00:00
galal-hussein
f34ba97c72 Update index.yaml
Signed-off-by: galal-hussein <galal-hussein@users.noreply.github.com>
2025-06-27 14:06:59 +00:00
enrichman
545b6d6daf Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-06-25 08:51:00 +00:00
galal-hussein
84791613bc Update index.yaml
Signed-off-by: galal-hussein <galal-hussein@users.noreply.github.com>
2025-06-24 13:00:57 +00:00
enrichman
094af525b2 Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-06-20 16:20:26 +00:00
enrichman
0d3122ed54 Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-06-04 07:57:47 +00:00
enrichman
b15bfdc83c Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-04-18 10:45:23 +00:00
galal-hussein
cd5fe2d52e Update index.yaml
Signed-off-by: galal-hussein <galal-hussein@users.noreply.github.com>
2025-03-21 01:30:36 +00:00
enrichman
1be8407c94 Update index.yaml
Signed-off-by: enrichman <enrichman@users.noreply.github.com>
2025-03-03 16:15:27 +00:00
galal-hussein
66ac302f4b Update index.yaml
Signed-off-by: galal-hussein <galal-hussein@users.noreply.github.com>
2025-02-17 13:13:16 +00:00
galal-hussein
ab3f916d0b Update index.yaml
Signed-off-by: galal-hussein <galal-hussein@users.noreply.github.com>
2025-02-14 13:36:51 +00:00
galal-hussein
f78af9a0db add chart-chart-0.1.5-r1 to index.yaml 2025-01-23 19:04:26 +00:00
Hussein Galal
8b0383f35e Fix chart release action (#210)
* Fix chart release action

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix chart release action

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-01-23 21:02:34 +02:00
Enrico Candino
9e52c375a0 bump urfave/cli to v2 (#205) 2025-01-23 10:14:01 +01:00
Hussein Galal
ca8f30fd9e upgrade chart (#207)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-01-23 02:30:12 +02:00
Hussein Galal
931c7c5fcb Fix secret tokens and DNS translation (#200)
* Include init containers in token translation

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix kubernetes.defaul service DNS translation

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add skip test var to dapper

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add kubelet version and image pull policy to the shared agent

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-01-23 01:55:05 +02:00
Enrico Candino
fd6ed8184f removed antiaffinity (#199) 2025-01-22 18:34:30 +01:00
Enrico Candino
c285004944 fix release tag (#201) 2025-01-22 15:18:10 +01:00
Enrico Candino
b0aa22b2f4 Simplify Cluster spec (#193)
* removed some required parameters, adding defaults

* add hostVersion in Status field

* fix tests
2025-01-21 21:19:44 +01:00
Enrico Candino
3f49593f96 Add Cluster creation test (#192)
* added k3kcli to path

* test create cluster

* updated ptr

* added cluster creation test
2025-01-21 17:53:42 +01:00
Enrico Candino
0b3a5f250e Added golangci-lint action (#197)
* added golangci-lint action

* linters

* cleanup linters

* fix error, increase timeout

* removed unnecessary call to Stringer
2025-01-21 11:30:57 +01:00
Enrico Candino
e7671134d2 fixed missing version (#196) 2025-01-21 10:52:27 +01:00
Enrico Candino
f9b3d62413 bump go1.23 (#198) 2025-01-21 10:50:23 +01:00
Enrico Candino
d4368da9a0 E2E tests scaffolding (#189)
* testcontainers

add build script

dropped namespace from chart

upload logs

removed old tests

* show go.mod diffs
2025-01-16 20:40:53 +01:00
Hussein Galal
c93cdd0333 Add retry for k3k-kubelet provider functions (#188)
* Add retry for k3k kubelet provider functions

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add retry for k3k kubelet provider function

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* go mod tidy

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-01-16 21:34:28 +02:00
Enrico Candino
958d515a59 removed Namespace creation from charts, edited default (#190) 2025-01-16 18:34:17 +01:00
Hussein Galal
9d0c907df2 Fix downward api for status fields in k3k-kubelet (#185)
* Fix downward api for status fields in k3k-kubelet

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-01-16 02:40:17 +02:00
Enrico Candino
1691d48875 Fix for UpdatePod (#187)
* fix for UpdatePod

* removed print
2025-01-15 18:50:21 +01:00
Enrico Candino
960afe9504 fix error for existing webhook (#186) 2025-01-15 18:43:12 +01:00
Enrico Candino
349f54d627 fix for default priorityClasses (#182) 2025-01-14 20:30:16 +01:00
Hussein Galal
ccaa09fa4a Add PVC syncing support (#179)
* Add pvc syncing support

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-01-14 20:57:04 +02:00
Enrico Candino
f9ddec53b7 Added priorityClass to Clusters and ClusterSets (#180)
* added priorityClass to Clusters and ClusterSets

* fixed comment
2025-01-14 11:05:48 +01:00
Enrico Candino
5892121dbe Fix action event check on wrong field event_name (#177)
The event name should be checked against the `event_name` field.
2025-01-09 11:28:43 +01:00
Enrico Candino
524dc69b98 Fix for missing permission (#176) 2025-01-09 10:25:38 +01:00
Enrico Candino
4fdce5b1aa Test release workflows (#173)
* goreleaser action

* removed old release

* fix gomega version in tests

* updated build workflow

* fix for empty var
2025-01-09 10:10:53 +01:00
Enrico Candino
9fc4a57fc2 Fix go.mod (#171)
* check go mod

* fix go.mod
2025-01-08 10:02:23 +01:00
Enrico Candino
ee00b08927 Add real node resources to virtual node (#169)
* add real nodes capacity to virtual node

* distinguish capacity from allocatable node resources
2025-01-02 22:22:18 +01:00
Enrico Candino
7fdd48d577 Implementation of GetStatsSummary and GetMetricsResource for Virtual Kubelet (#163)
* implemented  GetStatsSummary and GetMetricsResource for Virtual Kubelet

* fixed ClusterRole for node proxy

* limit the clusterrole with get and list

* remove unused Metrics client interface
2024-12-27 11:41:40 +01:00
jpgouin
70a098df4c allow exec into pod and fetching log in shared mode (#160) 2024-12-17 11:41:17 +01:00
Hussein Galal
6739aa0382 Initial networking support for shared mode (#154)
* Initial networking support for shared mode

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix deletion logic and controller reference

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* golintci

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-12-10 23:22:55 +02:00
Enrico Candino
acd9d96732 fix timeout (#157) 2024-12-05 19:34:12 +01:00
Enrico Candino
72b2a5f1d1 Added podSecurityAdmissionLevel to ClusterSet (#145)
* added Namespace reconciliation for PodSecurity labels

* added Namespace Watch

* added tests, and example

* bump deps
2024-12-04 21:38:02 +01:00
Enrico Candino
8e7d0f43a9 changed cluster creation backoff (#156) 2024-12-04 20:44:43 +01:00
Enrico Candino
a235b85362 Bump testing dependencies (#155)
* fixed testing deps, added doc

* added manual dispatch
2024-12-04 20:31:33 +01:00
Enrico Candino
6d716e43b2 Bump deps and enable tests on PRs (#152)
* enable tests on PRs

* bump deps
2024-11-28 20:13:57 +01:00
Enrico Candino
6db5247ff7 fix netpol reconciliation (#150) 2024-11-28 01:44:37 +01:00
Enrico Candino
c561b033df Added allowedNodeTypes to ClusterSet, and fixed NetworkPolicy reconciliation (#144)
* updated CRDs

* added Mode to ClusterSet, and enum to CRD

* fix typos

* fix mode type in cli

* deletion of second clusterset in same namespace

* removed focused test, added clusterset example

* renamed modes

* added allowedNodeTypes, fixed samples

* fixed network policy reconciliation
2024-11-27 23:00:39 +02:00
Enrico Candino
37573d36a4 Added envtest integration tests for ClusterSet (#143)
* init tests

* added clusterset tests

* added github action

* updated Dapper with envtest bins
2024-11-11 18:13:20 +02:00
Hussein Galal
bc25c1c70a Serviceaccount token synchronization (#139)
* Serviceaccount token sync

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixing typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-11-08 00:11:56 +02:00
Enrico Candino
c9599963d1 added node selector to workloads (#138) 2024-11-06 21:50:51 +02:00
Hussein Galal
84f921641b Token random generation (#136)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-11-01 21:27:03 +02:00
Michael Bolot
26a7fa023f Adding basic volume syncing (#137)
* Adding basic volume syncing

Adds syncing for basic volume types (secret/configmap/projected secret
and configmap). Also changes the virtual kubelet to use a cache from
controller-runtime rather than a client for some operations.
2024-10-31 11:57:59 -05:00
Hussein Galal
7599d6946f Fix virtual node types (#135)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-10-24 23:56:17 +03:00
Hussein Galal
f04902f0a2 Add structured logging via zap (#133)
* Add structured logging properly

use a centralized logger wrapper to work with controller and virt-kubelet

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix some log messages

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-10-22 01:04:21 +03:00
Hussein Galal
d19f0f9ca6 virtual-kubelet controller integration (#130)
* Virtual kubelet controller integration

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add k3k-kubelet image to the release workflow

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add k3k-kubelet image to the release workflow

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix build/release workflow

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Remove pkg directory in k3k-kubelet

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* rename Type to Config

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Move the kubelet and config outside of pkg

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix comments

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix naming throughout the package

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix comments

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* more fixes to naming

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-10-21 22:54:08 +03:00
Hussein Galal
bf1fe2a71c Adding Networkpolicy to ClusterSets (#125)
* Adding cluster set types

Adds types for cluster sets, which allows constraining a few elements of
clusters including: overall resource usage, and which nodes it can use.

* Add networkpolicy to clustersets

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix comments

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix linting issues

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixing node controller logic and nit fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* more fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix main cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Comment the resource quota for clustersets

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
Co-authored-by: Michael Bolot <michael.bolot@suse.com>
2024-10-16 00:27:42 +03:00
Hussein Galal
dbe6767aff Adding experimental disclaimer (#129)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-10-11 00:33:27 +03:00
Michael Bolot
ab33b3cb3f Adding poc for virtual kubelet (#112)
Adds a POC for running pods in the host cluster powered by virtual kubelet.
2024-10-01 00:33:10 +03:00
Michael Bolot
56da25941f Fixing bugs with namespaced clusters (#111)
Fixes a few bugs with namespaced clusters, specifically:
- The agent config still used a hardcoded value for the config secret
  mount
- The kubeconfig generation still used the old "cluster namespace" as
  the destination
In addition, changes the headless service name to not have two "-".
2024-09-06 02:15:36 +03:00
Michael Bolot
9faab4f82d Changing the cluster to be namespaced (#110)
* Changing the cluster to be namespaced

Changes the cluster type to be namespaced (and changes the various
controllers to work with this new feature). Also adds crd generation and
docs to the core cluster type.

* CI fix
2024-09-05 22:50:11 +03:00
Hussein Galal
bf72d39280 Use gh tool (#106)
* use gh tool instead of third party gh action

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix checksum

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add GH_TOKEN env

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-06-21 23:56:43 +03:00
Hussein Galal
3879912b57 Move to Github Action (#105)
* Move to Github Action

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Move to Github Action

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix code generation

* Add release and chart workflows

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add release and chart workflows

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add release and chart workflows

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add release and chart workflows

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* test release and charts

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* test release and charts

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* test release and charts

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* test release and charts

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* test release and charts

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix GHA migration

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix GHA migration

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-05-21 00:00:47 +03:00
Hussein Galal
0d6bf4922a Fix code generation (#104)
* Fix code generation

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* update go

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-05-14 01:05:13 +03:00
Cuong Nguyen Duc
57c24f6f3c Correct file name (#95) 2024-03-18 08:46:59 +02:00
Hussein Galal
fe23607b71 Update chart to 0.1.4-r1 (#98)
* Update chart to 0.1.4-r1

* Update image to v0.2.1
2024-03-15 02:10:33 +02:00
Hussein Galal
caa0537d5e Renaming binaries and fix typo (#97) 2024-03-15 01:39:18 +02:00
Hussein Galal
0cad65e4fe Fix for readiness probe (#96)
* Fix for readiness probe

* update code generator code
2024-03-15 01:04:52 +02:00
Hussein Galal
cc914cf870 Update chart (#91)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-02-15 23:59:59 +02:00
Hussein Galal
ba35d12124 Cluster spec update (#90)
* Remove unused functions

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* enable cluster server and agent update

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-25 06:37:59 +02:00
Hussein Galal
6fc22df6bc Cluster type validations (#89)
* Cluster type validations

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Cluster type validations

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-12 23:09:30 +02:00
Hussein Galal
c92f722122 Add delete subcommand (#88)
* Add delete subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add delete subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-11 02:36:12 +02:00
Hussein Galal
5e141fe98e Add kubeconfig subcommand (#87)
* Add kubeconfig subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add kubeconfig subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add kubeconfig subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add kubeconfig subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-11 00:57:46 +02:00
Hussein Galal
4b2308e709 Update chart to v0.1.2-r1 (#82)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-06 07:38:54 +02:00
Hussein Galal
3cdcb04e1a Add validation for system cluster name for both controller and cli (#81)
* Add validation for system cluster name for both controller and cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add validation for system cluster name for both controller and cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add validation for system cluster name for both controller and cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-06 02:15:20 +02:00
Hussein Galal
fedfa109b5 Fix append to empty slice (#80)
* Fix append to empty slice

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix initialization of addresses slice

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-04 01:49:48 +02:00
Hussein Galal
99d043f2ee fix chart releases (#79)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:55:09 +02:00
Hussein Galal
57ed675a7f fix chart releases (#78)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:49:05 +02:00
Hussein Galal
7c9060c394 fix chart release (#77)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:37:08 +02:00
Hussein Galal
a104aacf5f Add github config mail and username for pushing k3k release (#76)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:24:46 +02:00
Hussein Galal
6346b06eb3 Add github config mail and username for pushing k3k release (#75)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:08:10 +02:00
Hussein Galal
6fd745f268 Fix chart release (#74)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 01:53:26 +02:00
Hussein Galal
1258fb6d58 Upgrade chart and fix manifest (#73)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 00:03:08 +02:00
Matt Trachier
130dbb0a33 Fix: upgrade go in go.mod (#64)
Signed-off-by: matttrach <matttrach@gmail.com>
2023-12-13 00:21:26 +02:00
Hussein Galal
67c8cac611 [controller] HA stabilization and fix rejoining ephermal nodes (#68)
* Remove etcd member if server pod gets removed

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make sure to add finalizer to server pod only in HA mode

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix recursion bug and add new fields to cluster status

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixing comments

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixing comments

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-12-13 00:21:02 +02:00
Hussein Galal
dd618e580a use statefulsets for servers (#67)
* use statefulsets for servers

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove unused code

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix comments

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-11-28 23:51:50 +02:00
Paulo Gomes
dc2f410c17 build: Align drone base images (#66)
Align the base images used in drone with the images used across the
ecosystem.
2023-11-28 19:01:37 +02:00
Hussein Galal
a620f6c66f Fix kubeconfig extract in cli (#65)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-11-28 19:00:10 +02:00
Hussein Galal
3c283ce178 Add readiness probe and fix readme (#63)
* Add readiness probe and fix readme

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* typos and fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-09-05 22:24:27 +03:00
Brian Downs
0dd234b2d5 Add Addon Feature (#61)
* resolve conflicts and other changes

Signed-off-by: Brian Downs <brian.downs@gmail.com>

* updates

Signed-off-by: Brian Downs <brian.downs@gmail.com>

* fix remaining conflict

Signed-off-by: Brian Downs <brian.downs@gmail.com>

* add back cluster and service cidr

Signed-off-by: Brian Downs <brian.downs@gmail.com>

---------

Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-08-31 01:34:35 +03:00
Johnatas
986216f9cd Feat improve k3kcli os support (#62)
* Allowing multiple OSs

* add docs for windows

* Improve docs

* add manifests

* fix readme

* Update README.md

* Improve macos docs

* Change base build to go v1.20.7 and add freebsd
2023-08-31 01:27:48 +03:00
100 changed files with 11154 additions and 2060 deletions

View File

@@ -1,137 +0,0 @@
---
kind: pipeline
name: amd64
platform:
os: linux
arch: amd64
steps:
- name: build
image: rancher/dapper:v0.5.0
environment:
GITHUB_TOKEN:
from_secret: github_token
commands:
- dapper ci
- echo "${DRONE_TAG}-amd64" | sed -e 's/+/-/g' >.tags
volumes:
- name: docker
path: /var/run/docker.sock
when:
branch:
exclude:
- k3k-chart
- name: package-chart
image: rancher/dapper:v0.5.0
environment:
GITHUB_TOKEN:
from_secret: github_token
commands:
- dapper package-chart
volumes:
- name: docker
path: /var/run/docker.sock
when:
branch:
- k3k-chart
instance:
- drone-publish.rancher.io
- name: release-chart
image: rancher/dapper:v0.5.0
environment:
GITHUB_TOKEN:
from_secret: github_token
commands:
- dapper release-chart
volumes:
- name: docker
path: /var/run/docker.sock
when:
branch:
- k3k-chart
instance:
- drone-publish.rancher.io
- name: github_binary_release
image: ibuildthecloud/github-release:v0.0.1
settings:
api_key:
from_secret: github_token
prerelease: true
checksum:
- sha256
checksum_file: CHECKSUMsum-amd64.txt
checksum_flatten: true
files:
- "bin/*"
when:
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
branch:
exclude:
- k3k-chart
- name: docker-publish
image: plugins/docker
settings:
dockerfile: package/Dockerfile
password:
from_secret: docker_password
repo: "rancher/k3k"
username:
from_secret: docker_username
when:
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
branch:
exclude:
- k3k-chart
volumes:
- name: docker
host:
path: /var/run/docker.sock
---
kind: pipeline
type: docker
name: manifest
platform:
os: linux
arch: amd64
steps:
- name: push-runtime-manifest
image: plugins/manifest
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
spec: manifest-runtime.tmpl
when:
event:
- tag
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
branch:
exclude:
- k3k-chart
depends_on:
- amd64

34
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: Build
on:
push:
branches:
- main
pull_request:
permissions:
contents: read
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser
version: v2
args: --clean --snapshot
env:
REPO: ${{ github.repository }}
REGISTRY:

30
.github/workflows/chart.yml vendored Normal file
View File

@@ -0,0 +1,30 @@
on:
push:
tags:
- "chart-*"
env:
GITHUB_TOKEN: ${{ github.token }}
name: Chart
permissions:
contents: write
id-token: write
jobs:
chart-release:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Package Chart
run: |
make package-chart;
- name: Release Chart
run: |
gh release upload ${{ github.ref_name }} deploy/*
- name: Index Chart
run: |
make index-chart

61
.github/workflows/release-delete.yml vendored Normal file
View File

@@ -0,0 +1,61 @@
name: Release - Delete Draft
on:
workflow_dispatch:
inputs:
tag:
type: string
description: The tag of the release
permissions:
contents: write
packages: write
env:
GH_TOKEN: ${{ github.token }}
jobs:
release-delete:
runs-on: ubuntu-latest
steps:
- name: Check tag
if: inputs.tag == ''
run: echo "::error::Missing tag from input" && exit 1
- name: Checkout code
uses: actions/checkout@v4
- name: Check if release is draft
run: |
CURRENT_TAG=${{ inputs.tag }}
isDraft=$(gh release view ${CURRENT_TAG} --json isDraft --jq ".isDraft")
if [ "$isDraft" = true ]; then
echo "Release ${CURRENT_TAG} is draft"
else
echo "::error::Cannot delete non-draft release" && exit 1
fi
- name: Delete packages from Github Container Registry
run: |
CURRENT_TAG=${{ inputs.tag }}
echo "Deleting packages with tag ${CURRENT_TAG}"
JQ_QUERY=".[] | select(.metadata.container.tags[] == \"${CURRENT_TAG}\")"
for package in k3k k3k-kubelet
do
echo "Deleting ${package} image"
PACKAGE_TO_DELETE=$(gh api /user/packages/container/${package}/versions --jq "${JQ_QUERY}")
echo $PACKAGE_TO_DELETE | jq
PACKAGE_ID=$(echo $PACKAGE_TO_DELETE | jq .id)
echo "Deleting ${PACKAGE_ID}"
gh api --method DELETE /user/packages/container/${package}/versions/${PACKAGE_ID}
done
- name: Delete Github release
run: |
CURRENT_TAG=${{ inputs.tag }}
echo "Deleting release ${CURRENT_TAG}"
gh release delete ${CURRENT_TAG}

87
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,87 @@
name: Release
on:
push:
tags:
- "v*"
workflow_dispatch:
inputs:
commit:
type: string
description: Checkout a specific commit
permissions:
contents: write
packages: write
id-token: write
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- name: Checkout code at the specific commit
if: inputs.commit != ''
run: git checkout ${{ inputs.commit }}
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: "Read secrets"
uses: rancher-eio/read-vault-secrets@main
if: github.repository_owner == 'rancher'
with:
secrets: |
secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials username | DOCKER_USERNAME ;
secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials password | DOCKER_PASSWORD ;
# Manually dispatched workflows (or forks) will use ghcr.io
- name: Setup ghcr.io
if: github.event_name == 'workflow_dispatch' || github.repository_owner != 'rancher'
run: |
echo "REGISTRY=ghcr.io" >> $GITHUB_ENV
echo "DOCKER_USERNAME=${{ github.actor }}" >> $GITHUB_ENV
echo "DOCKER_PASSWORD=${{ github.token }}" >> $GITHUB_ENV
- name: Login to container registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ env.DOCKER_USERNAME }}
password: ${{ env.DOCKER_PASSWORD }}
# If the tag does not exists the workflow was manually triggered.
# That means we are creating temporary nightly builds, with a "fake" local tag
- name: Check release tag
id: release-tag
run: |
CURRENT_TAG=$(git describe --tag --always --match="v[0-9]*")
if git show-ref --tags ${CURRENT_TAG} --quiet; then
echo "tag ${CURRENT_TAG} already exists";
else
echo "tag ${CURRENT_TAG} does not exist"
git tag ${CURRENT_TAG}
fi
echo "CURRENT_TAG=${CURRENT_TAG}" >> "$GITHUB_OUTPUT"
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser
version: v2
args: --clean
env:
GITHUB_TOKEN: ${{ github.token }}
GORELEASER_CURRENT_TAG: ${{ steps.release-tag.outputs.CURRENT_TAG }}
REGISTRY: ${{ env.REGISTRY }}
REPO: ${{ github.repository }}

101
.github/workflows/test.yaml vendored Normal file
View File

@@ -0,0 +1,101 @@
name: Tests
on:
push:
pull_request:
workflow_dispatch:
permissions:
contents: read
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
args: --timeout=5m
version: v1.60
tests:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Check go modules
run: |
go mod tidy
git --no-pager diff go.mod go.sum
test -z "$(git status --porcelain)"
- name: Install tools
run: |
go install github.com/onsi/ginkgo/v2/ginkgo
go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
ENVTEST_BIN=$(setup-envtest use -p path)
sudo mkdir -p /usr/local/kubebuilder/bin
sudo cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin
- name: Run tests
run: ginkgo -v -r --skip-file=tests
tests-e2e:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Check go modules
run: |
go mod tidy
git --no-pager diff go.mod go.sum
test -z "$(git status --porcelain)"
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Build
run: |
./scripts/build
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
- name: Check k3kcli
run: k3kcli -v
- name: Run tests
run: ginkgo -v ./tests
- name: Archive k3s logs
uses: actions/upload-artifact@v4
if: always()
with:
name: k3s-logs
path: /tmp/k3s.log

4
.gitignore vendored
View File

@@ -4,4 +4,6 @@
/dist
*.swp
.idea
.vscode/
__debug*
*-kubeconfig.yaml

9
.golangci.yml Normal file
View File

@@ -0,0 +1,9 @@
linters:
enable:
# default linters
- errcheck
- gosimple
- govet
- ineffassign
- staticcheck
- unused

99
.goreleaser.yaml Normal file
View File

@@ -0,0 +1,99 @@
version: 2
release:
draft: true
replace_existing_draft: true
prerelease: auto
before:
hooks:
- go mod tidy
- go generate ./...
builds:
- id: k3k
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- "amd64"
- "arm64"
- "s390x"
ldflags:
- -w -s # strip debug info and symbol table
- -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}"
- id: k3k-kubelet
main: ./k3k-kubelet
binary: k3k-kubelet
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- "amd64"
- "arm64"
- "s390x"
ldflags:
- -w -s # strip debug info and symbol table
- -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}"
- id: k3kcli
main: ./cli
binary: k3kcli
env:
- CGO_ENABLED=0
goarch:
- "amd64"
- "arm64"
ldflags:
- -w -s # strip debug info and symbol table
- -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}"
archives:
- format: binary
name_template: >-
{{ .Binary }}-{{- .Os }}-{{ .Arch }}
{{- if .Arm }}v{{ .Arm }}{{ end }}
format_overrides:
- goos: windows
format: zip
# For the image_templates we are using the following expression to build images for the correct registry
# {{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}
#
# REGISTRY= -> rancher/k3k:vX.Y.Z
# REGISTRY=ghcr.io -> ghcr.io/rancher/k3k:latest:vX.Y.Z
#
dockers:
- id: k3k
use: docker
ids:
- k3k
- k3kcli
dockerfile: "package/Dockerfile"
skip_push: false
image_templates:
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}"
build_flag_templates:
- "--build-arg=BIN_K3K=k3k"
- "--build-arg=BIN_K3KCLI=k3kcli"
- id: k3k-kubelet
use: docker
ids:
- k3k-kubelet
dockerfile: "package/Dockerfile.kubelet"
skip_push: false
image_templates:
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}"
build_flag_templates:
- "--build-arg=BIN_K3K_KUBELET=k3k-kubelet"
changelog:
sort: asc
filters:
exclude:
- "^docs:"
- "^test:"

View File

@@ -1,4 +1,4 @@
ARG GOLANG=rancher/hardened-build-base:v1.20.6b2
ARG GOLANG=rancher/hardened-build-base:v1.23.4b1
FROM ${GOLANG}
ARG DAPPER_HOST_ARCH
@@ -6,16 +6,26 @@ ENV ARCH $DAPPER_HOST_ARCH
RUN apk -U add \bash git gcc musl-dev docker vim less file curl wget ca-certificates
RUN if [ "${ARCH}" == "amd64" ]; then \
curl -sL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s v1.15.0; \
curl -sL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.59.0; \
fi
RUN curl -sL https://github.com/helm/chart-releaser/releases/download/v1.5.0/chart-releaser_1.5.0_linux_${ARCH}.tar.gz | tar -xz cr \
&& mv cr /bin/
# Tool for CRD generation.
ENV CONTROLLER_GEN_VERSION v0.14.0
RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@${CONTROLLER_GEN_VERSION}
# Tool to setup the envtest framework to run the controllers integration tests
RUN go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest && \
ENVTEST_BIN=$(setup-envtest use -p path) && \
mkdir -p /usr/local/kubebuilder/bin && \
cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin
ENV GO111MODULE on
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS GITHUB_TOKEN
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS GITHUB_TOKEN SKIP_TESTS
ENV DAPPER_SOURCE /go/src/github.com/rancher/k3k/
ENV DAPPER_OUTPUT ./bin ./dist ./deploy
ENV DAPPER_OUTPUT ./bin ./dist ./deploy ./charts
ENV DAPPER_DOCKER_SOCKET true
ENV HOME ${DAPPER_SOURCE}
WORKDIR ${DAPPER_SOURCE}

View File

@@ -1,5 +1,4 @@
TARGETS := $(shell ls ops)
.dapper:
@echo Downloading dapper
@curl -sL https://releases.rancher.com/dapper/latest/dapper-$$(uname -s)-$$(uname -m) > .dapper.tmp
@@ -12,4 +11,4 @@ $(TARGETS): .dapper
.DEFAULT_GOAL := default
.PHONY: $(TARGETS)
.PHONY: $(TARGETS)

View File

@@ -1,18 +1,60 @@
# K3K
[![Experimental](https://img.shields.io/badge/status-experimental-orange.svg)](https://shields.io/)
A Kubernetes in Kubernetes tool, k3k provides a way to run multiple embedded isolated k3s clusters on your kubernetes cluster.
**Experimental Tool**
This project is still under development and is considered experimental. It may have limitations, bugs, or changes. Please use with caution and report any issues you encounter. We appreciate your feedback as we continue to refine and improve this tool.
## Example
An example on creating a k3k cluster on an RKE2 host using k3kcli
[![asciicast](https://asciinema.org/a/eYlc3dsL2pfP2B50i3Ea8MJJp.svg)](https://asciinema.org/a/eYlc3dsL2pfP2B50i3Ea8MJJp)
## Usage
## Architecture
K3K consists of a controller and a cli tool, the controller can be deployed via a helm chart and the cli can be downloaded from the releases page.
### Deploy Controller
### Controller
The K3K controller will watch a CRD called `clusters.k3k.io`. Once found, the controller will create a separate namespace and it will create a K3S cluster as specified in the spec of the object.
Each server and agent is created as a separate pod that runs in the new namespace.
### CLI
The CLI provides a quick and easy way to create K3K clusters using simple flags, and automatically exposes the K3K clusters so it's accessible via a kubeconfig.
## Features
### Isolation
Each cluster runs in a sperate namespace that can be isolated via netowrk policies and RBAC rules, clusters also run in a sperate network namespace with flannel as the backend CNI. Finally, each cluster has a separate datastore which can be persisted.
In addition, k3k offers a persistence feature that can help users to persist their datatstore, using dynamic storage class volumes.
### Portability and Customization
The "Cluster" object is considered the template of the cluster that you can re-use to spin up multiple clusters in a matter of seconds.
K3K clusters use K3S internally and leverage all options that can be passed to K3S. Each cluster is exposed to the host cluster via NodePort, LoadBalancers, and Ingresses.
| | Separate Namespace (for each tenant) | K3K | vcluster | Separate Cluster (for each tenant) |
|-----------------------|---------------------------------------|------------------------------|-----------------|------------------------------------|
| Isolation | Very weak | Very strong | strong | Very strong |
| Access for tenants | Very restricted | Built-in k8s RBAC / Rancher | Vclustser admin | Cluster admin |
| Cost | Very cheap | Very cheap | cheap | expensive |
| Overhead | Very low | Very low | Very low | Very high |
| Networking | Shared | Separate | shared | separate |
| Cluster Configuration | | Very easy | Very hard | |
## Usage
### Deploy K3K Controller
[Helm](https://helm.sh) must be installed to use the charts. Please refer to
Helm's [documentation](https://helm.sh/docs) to get started.
@@ -45,14 +87,57 @@ helm delete my-k3k
To create a new cluster you need to install and run the cli or create a cluster object, to install the cli:
```sh
wget https://github.com/rancher/k3k/releases/download/v0.0.0-alpha6/k3kcli
#### For linux and macOS
1 - Donwload the binary, linux dowload url:
```
wget https://github.com/rancher/k3k/releases/download/v0.0.0-alpha2/k3kcli
```
macOS dowload url:
```
wget https://github.com/rancher/k3k/releases/download/v0.0.0-alpha2/k3kcli
```
Then copy to local bin
```
chmod +x k3kcli
sudo cp k3kcli /usr/local/bin
```
#### For Windows
1 - Download the Binary:
Use PowerShell's Invoke-WebRequest cmdlet to download the binary:
```powershel
Invoke-WebRequest -Uri "https://github.com/rancher/k3k/releases/download/v0.0.0-alpha2/k3kcli-windows" -OutFile "k3kcli.exe"
```
2 - Copy the Binary to a Directory in PATH:
To allow running the binary from any command prompt, you can copy it to a directory in your system's PATH. For example, copying it to C:\Users\<YourUsername>\bin (create this directory if it doesn't exist):
```
Copy-Item "k3kcli.exe" "C:\bin"
```
3 - Update Environment Variable (PATH):
If you haven't already added `C:\bin` (or your chosen directory) to your PATH, you can do it through PowerShell:
```
setx PATH "C:\bin;%PATH%"
```
To create a new cluster you can use:
```sh
k3k cluster create --name example-cluster --token test
```
## Tests
To run the tests we use [Ginkgo](https://onsi.github.io/ginkgo/), and [`envtest`](https://book.kubebuilder.io/reference/envtest) for testing the controllers.
Install the required binaries from `envtest` with [`setup-envtest`](https://pkg.go.dev/sigs.k8s.io/controller-runtime/tools/setup-envtest), and then put them in the default path `/usr/local/kubebuilder/bin`:
```
ENVTEST_BIN=$(setup-envtest use -p path)
sudo mkdir -p /usr/local/kubebuilder/bin
sudo cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin
```
then run `ginkgo run ./...`.

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.1.0-r1
appVersion: 0.0.0-alpha6
version: 0.1.5-r1
appVersion: v0.2.2-rc4

View File

@@ -1,93 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusters.k3k.io
spec:
group: k3k.io
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
name:
type: string
version:
type: string
servers:
type: integer
agents:
type: integer
token:
type: string
clusterCIDR:
type: string
serviceCIDR:
type: string
clusterDNS:
type: string
serverArgs:
type: array
items:
type: string
agentArgs:
type: array
items:
type: string
tlsSANs:
type: array
items:
type: string
persistence:
type: object
properties:
type:
type: string
default: "ephermal"
storageClassName:
type: string
storageRequestSize:
type: string
expose:
type: object
properties:
ingress:
type: object
properties:
enabled:
type: boolean
ingressClassName:
type: string
loadbalancer:
type: object
properties:
enabled:
type: boolean
nodePort:
type: object
properties:
enabled:
type: boolean
status:
type: object
properties:
overrideClusterCIDR:
type: boolean
clusterCIDR:
type: string
overrideServiceCIDR:
type: boolean
serviceCIDR:
type: string
clusterDNS:
type: string
scope: Cluster
names:
plural: clusters
singular: cluster
kind: Cluster

View File

@@ -0,0 +1,257 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
name: clusters.k3k.io
spec:
group: k3k.io
names:
kind: Cluster
listKind: ClusterList
plural: clusters
singular: cluster
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
default: {}
properties:
addons:
description: Addons is a list of secrets containing raw YAML which
will be deployed in the virtual K3k cluster on startup.
items:
properties:
secretNamespace:
type: string
secretRef:
type: string
type: object
type: array
agentArgs:
description: AgentArgs are the ordered key value pairs (e.x. "testArg",
"testValue") for the K3s pods running in agent mode.
items:
type: string
type: array
agents:
default: 0
description: Agents is the number of K3s pods to run in agent (worker)
mode.
format: int32
type: integer
x-kubernetes-validations:
- message: invalid value for agents
rule: self >= 0
clusterCIDR:
description: ClusterCIDR is the CIDR range for the pods of the cluster.
Defaults to 10.42.0.0/16.
type: string
x-kubernetes-validations:
- message: clusterCIDR is immutable
rule: self == oldSelf
clusterDNS:
description: |-
ClusterDNS is the IP address for the coredns service. Needs to be in the range provided by ServiceCIDR or CoreDNS may not deploy.
Defaults to 10.43.0.10.
type: string
x-kubernetes-validations:
- message: clusterDNS is immutable
rule: self == oldSelf
clusterLimit:
description: Limit is the limits that apply for the server/worker
nodes.
properties:
serverLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: ServerLimit is the limits (cpu/mem) that apply to
the server nodes
type: object
workerLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: WorkerLimit is the limits (cpu/mem) that apply to
the agent nodes
type: object
type: object
expose:
description: |-
Expose contains options for exposing the apiserver inside/outside of the cluster. By default, this is only exposed as a
clusterIP which is relatively secure, but difficult to access outside of the cluster.
properties:
ingress:
properties:
enabled:
type: boolean
ingressClassName:
type: string
type: object
loadbalancer:
properties:
enabled:
type: boolean
required:
- enabled
type: object
nodePort:
properties:
enabled:
type: boolean
required:
- enabled
type: object
type: object
mode:
allOf:
- enum:
- shared
- virtual
- enum:
- shared
- virtual
default: shared
description: Mode is the cluster provisioning mode which can be either
"shared" or "virtual". Defaults to "shared"
type: string
x-kubernetes-validations:
- message: mode is immutable
rule: self == oldSelf
nodeSelector:
additionalProperties:
type: string
description: |-
NodeSelector is the node selector that will be applied to all server/agent pods.
In "shared" mode the node selector will be applied also to the workloads.
type: object
persistence:
description: |-
Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data
persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field.
properties:
storageClassName:
type: string
storageRequestSize:
type: string
type:
default: ephemeral
description: Type can be ephermal, static, dynamic
type: string
required:
- type
type: object
priorityClass:
description: |-
PriorityClass is the priorityClassName that will be applied to all server/agent pods.
In "shared" mode the priorityClassName will be applied also to the workloads.
type: string
serverArgs:
description: ServerArgs are the ordered key value pairs (e.x. "testArg",
"testValue") for the K3s pods running in server mode.
items:
type: string
type: array
servers:
default: 1
description: Servers is the number of K3s pods to run in server (controlplane)
mode.
format: int32
type: integer
x-kubernetes-validations:
- message: cluster must have at least one server
rule: self >= 1
serviceCIDR:
description: ServiceCIDR is the CIDR range for the services in the
cluster. Defaults to 10.43.0.0/16.
type: string
x-kubernetes-validations:
- message: serviceCIDR is immutable
rule: self == oldSelf
tlsSANs:
description: TLSSANs are the subjectAlternativeNames for the certificate
the K3s server will use.
items:
type: string
type: array
tokenSecretRef:
description: |-
TokenSecretRef is Secret reference used as a token join server and worker nodes to the cluster. The controller
assumes that the secret has a field "token" in its data, any other fields in the secret will be ignored.
properties:
name:
description: name is unique within a namespace to reference a
secret resource.
type: string
namespace:
description: namespace defines the space within which the secret
name must be unique.
type: string
type: object
x-kubernetes-map-type: atomic
version:
description: Version is a string representing the Kubernetes version
to be used by the virtual nodes.
type: string
type: object
status:
properties:
clusterCIDR:
type: string
clusterDNS:
type: string
hostVersion:
type: string
persistence:
properties:
storageClassName:
type: string
storageRequestSize:
type: string
type:
default: ephemeral
description: Type can be ephermal, static, dynamic
type: string
required:
- type
type: object
serviceCIDR:
type: string
tlsSANs:
items:
type: string
type: array
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -0,0 +1,210 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
name: clustersets.k3k.io
spec:
group: k3k.io
names:
kind: ClusterSet
listKind: ClusterSetList
plural: clustersets
singular: clusterset
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
default: {}
description: Spec is the spec of the ClusterSet
properties:
allowedNodeTypes:
default:
- shared
description: AllowedNodeTypes are the allowed cluster provisioning
modes. Defaults to [shared].
items:
description: ClusterMode is the possible provisioning mode of a
Cluster.
enum:
- shared
- virtual
type: string
minItems: 1
type: array
x-kubernetes-validations:
- message: mode is immutable
rule: self == oldSelf
defaultLimits:
description: DefaultLimits are the limits used for servers/agents
when a cluster in the set doesn't provide any
properties:
serverLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: ServerLimit is the limits (cpu/mem) that apply to
the server nodes
type: object
workerLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: WorkerLimit is the limits (cpu/mem) that apply to
the agent nodes
type: object
type: object
defaultNodeSelector:
additionalProperties:
type: string
description: DefaultNodeSelector is the node selector that applies
to all clusters (server + agent) in the set
type: object
defaultPriorityClass:
description: DefaultPriorityClass is the priorityClassName applied
to all pods of all clusters in the set
type: string
disableNetworkPolicy:
description: DisableNetworkPolicy is an option that will disable the
creation of a default networkpolicy for cluster isolation
type: boolean
maxLimits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: MaxLimits are the limits that apply to all clusters (server
+ agent) in the set
type: object
podSecurityAdmissionLevel:
description: PodSecurityAdmissionLevel is the policy level applied
to the pods in the namespace.
enum:
- privileged
- baseline
- restricted
type: string
type: object
status:
description: Status is the status of the ClusterSet
properties:
conditions:
description: Conditions are the invidual conditions for the cluster
set
items:
description: "Condition contains details for one aspect of the current
state of this API Resource.\n---\nThis struct is intended for
direct use as an array at the field path .status.conditions. For
example,\n\n\n\ttype FooStatus struct{\n\t // Represents the
observations of a foo's current state.\n\t // Known .status.conditions.type
are: \"Available\", \"Progressing\", and \"Degraded\"\n\t //
+patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t
\ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t
\ // other fields\n\t}"
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: |-
type of condition in CamelCase or in foo.example.com/CamelCase.
---
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
useful (see .node.status.conditions), the ability to deconflict is important.
The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
lastUpdateTime:
description: LastUpdate is the timestamp when the status was last
updated
type: string
observedGeneration:
description: ObservedGeneration was the generation at the time the
status was updated.
format: int64
type: integer
summary:
description: Summary is a summary of the status
type: string
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -4,7 +4,7 @@ metadata:
name: {{ include "k3k.fullname" . }}
labels:
{{- include "k3k.labels" . | nindent 4 }}
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
spec:
replicas: {{ .Values.image.replicaCount }}
selector:
@@ -16,11 +16,21 @@ spec:
{{- include "k3k.selectorLabels" . | nindent 8 }}
spec:
containers:
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
- image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: {{ .Chart.Name }}
env:
- name: CLUSTER_CIDR
value: {{ .Values.host.clusterCIDR }}
- name: SHARED_AGENT_IMAGE
value: "{{ .Values.sharedAgent.image.repository }}:{{ default .Chart.AppVersion .Values.sharedAgent.image.tag }}"
- name: SHARED_AGENT_PULL_POLICY
value: {{ .Values.sharedAgent.image.pullPolicy }}
ports:
- containerPort: 8080
name: https
protocol: TCP
serviceAccountName: {{ include "k3k.serviceAccountName" . }}
- containerPort: 9443
name: https-webhook
protocol: TCP
serviceAccountName: {{ include "k3k.serviceAccountName" . }}

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Values.namespace }}

View File

@@ -11,4 +11,27 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "k3k.serviceAccountName" . }}
namespace: {{ .Values.namespace }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "k3k.fullname" . }}-node-proxy
rules:
- apiGroups:
- ""
resources:
- "nodes"
- "nodes/proxy"
verbs:
- "get"
- "list"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "k3k.fullname" . }}-node-proxy
roleRef:
kind: ClusterRole
name: {{ include "k3k.fullname" . }}-node-proxy
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: k3k-webhook
labels:
{{- include "k3k.labels" . | nindent 4 }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: 443
protocol: TCP
name: https-webhook
targetPort: 9443
selector:
{{- include "k3k.selectorLabels" . | nindent 6 }}

View File

@@ -5,5 +5,5 @@ metadata:
name: {{ include "k3k.serviceAccountName" . }}
labels:
{{- include "k3k.labels" . | nindent 4 }}
namespace: {{ .Values.namespace }}
{{- end }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,19 +1,29 @@
replicaCount: 1
namespace: k3k-system
image:
repository: rancher/k3k
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
tag: "v0.0.0-alpha6"
tag: ""
pullPolicy: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
host:
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy for clustersets, if not set
# the controller will collect the PodCIDRs of all the nodes on the system.
clusterCIDR: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# configuration related to the shared agent mode in k3k
sharedAgent:
image:
repository: "rancher/k3k-kubelet"
tag: ""
pullPolicy: ""

View File

@@ -2,24 +2,28 @@ package cluster
import (
"github.com/rancher/k3k/cli/cmds"
"github.com/urfave/cli"
"github.com/urfave/cli/v2"
)
var clusterSubcommands = []cli.Command{
var subcommands = []*cli.Command{
{
Name: "create",
Usage: "Create new cluster",
SkipFlagParsing: false,
SkipArgReorder: true,
Action: createCluster,
Flags: append(cmds.CommonFlags, clusterCreateFlags...),
Name: "create",
Usage: "Create new cluster",
Action: create,
Flags: append(cmds.CommonFlags, clusterCreateFlags...),
},
{
Name: "delete",
Usage: "Delete an existing cluster",
Action: delete,
Flags: append(cmds.CommonFlags, clusterDeleteFlags...),
},
}
func NewClusterCommand() cli.Command {
return cli.Command{
func NewCommand() *cli.Command {
return &cli.Command{
Name: "cluster",
Usage: "cluster command",
Subcommands: clusterSubcommands,
Subcommands: subcommands,
}
}

View File

@@ -3,7 +3,6 @@ package cluster
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"path/filepath"
@@ -12,33 +11,25 @@ import (
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/controller"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v2"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/user"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
Scheme = runtime.NewScheme()
backoff = wait.Backoff{
Steps: 5,
Duration: 3 * time.Second,
Factor: 2,
Jitter: 0.1,
}
)
var Scheme = runtime.NewScheme()
func init() {
_ = clientgoscheme.AddToScheme(Scheme)
@@ -57,72 +48,79 @@ var (
persistenceType string
storageClassName string
version string
mode string
clusterCreateFlags = []cli.Flag{
cli.StringFlag{
&cli.StringFlag{
Name: "name",
Usage: "name of the cluster",
Destination: &name,
},
cli.Int64Flag{
&cli.Int64Flag{
Name: "servers",
Usage: "number of servers",
Destination: &servers,
Value: 1,
},
cli.Int64Flag{
&cli.Int64Flag{
Name: "agents",
Usage: "number of agents",
Destination: &agents,
},
cli.StringFlag{
&cli.StringFlag{
Name: "token",
Usage: "token of the cluster",
Destination: &token,
},
cli.StringFlag{
&cli.StringFlag{
Name: "cluster-cidr",
Usage: "cluster CIDR",
Destination: &clusterCIDR,
},
cli.StringFlag{
&cli.StringFlag{
Name: "service-cidr",
Usage: "service CIDR",
Destination: &serviceCIDR,
},
cli.StringFlag{
&cli.StringFlag{
Name: "persistence-type",
Usage: "Persistence mode for the nodes (ephermal, static, dynamic)",
Value: cluster.EphermalNodesType,
Value: server.EphermalNodesType,
Destination: &persistenceType,
},
cli.StringFlag{
&cli.StringFlag{
Name: "storage-class-name",
Usage: "Storage class name for dynamic persistence type",
Destination: &storageClassName,
},
cli.StringSliceFlag{
&cli.StringSliceFlag{
Name: "server-args",
Usage: "servers extra arguments",
Value: &serverArgs,
},
cli.StringSliceFlag{
&cli.StringSliceFlag{
Name: "agent-args",
Usage: "agents extra arguments",
Value: &agentArgs,
},
cli.StringFlag{
&cli.StringFlag{
Name: "version",
Usage: "k3s version",
Destination: &version,
Value: "v1.26.1-k3s1",
},
&cli.StringFlag{
Name: "mode",
Usage: "k3k mode type",
Destination: &mode,
Value: "shared",
},
}
)
func createCluster(clx *cli.Context) error {
func create(clx *cli.Context) error {
ctx := context.Background()
if err := validateCreateFlags(clx); err != nil {
if err := validateCreateFlags(); err != nil {
return err
}
@@ -138,16 +136,25 @@ func createCluster(clx *cli.Context) error {
if err != nil {
return err
}
if token != "" {
logrus.Infof("Creating cluster token secret")
obj := k3kcluster.TokenSecretObj(token, name, cmds.Namespace())
if err := ctrlClient.Create(ctx, &obj); err != nil {
return err
}
}
logrus.Infof("Creating a new cluster [%s]", name)
cluster := newCluster(
name,
cmds.Namespace(),
mode,
token,
int32(servers),
int32(agents),
clusterCIDR,
serviceCIDR,
serverArgs,
agentArgs,
serverArgs.Value(),
agentArgs.Value(),
)
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
@@ -173,13 +180,25 @@ func createCluster(clx *cli.Context) error {
}
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
cfg := &kubeconfig.KubeConfig{
CN: controller.AdminCommonName,
ORG: []string{user.SystemPrivilegedGroup},
ExpiryDate: 0,
}
logrus.Infof("waiting for cluster to be available..")
// retry every 5s for at most 2m, or 25 times
availableBackoff := wait.Backoff{
Duration: 5 * time.Second,
Cap: 2 * time.Minute,
Steps: 25,
}
var kubeconfig []byte
if err := retry.OnError(backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = extractKubeconfig(ctx, ctrlClient, cluster, host[0])
if err != nil {
return err
}
return nil
if err := retry.OnError(availableBackoff, apierrors.IsNotFound, func() error {
kubeconfig, err = cfg.Extract(ctx, ctrlClient, cluster, host[0])
return err
}); err != nil {
return err
}
@@ -198,39 +217,41 @@ func createCluster(clx *cli.Context) error {
return os.WriteFile(cluster.Name+"-kubeconfig.yaml", kubeconfig, 0644)
}
func validateCreateFlags(clx *cli.Context) error {
if persistenceType != cluster.EphermalNodesType &&
persistenceType != cluster.DynamicNodesType {
func validateCreateFlags() error {
if persistenceType != server.EphermalNodesType &&
persistenceType != server.DynamicNodesType {
return errors.New("invalid persistence type")
}
if token == "" {
return errors.New("empty cluster token")
}
if name == "" {
return errors.New("empty cluster name")
}
if name == k3kcluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
if servers <= 0 {
return errors.New("invalid number of servers")
}
if cmds.Kubeconfig == "" && os.Getenv("KUBECONFIG") == "" {
return errors.New("empty kubeconfig")
}
if mode != "shared" && mode != "virtual" {
return errors.New(`mode should be one of "shared" or "virtual"`)
}
return nil
}
func newCluster(name, token string, servers, agents int32, clusterCIDR, serviceCIDR string, serverArgs, agentArgs []string) *v1alpha1.Cluster {
return &v1alpha1.Cluster{
func newCluster(name, namespace, mode, token string, servers, agents int32, clusterCIDR, serviceCIDR string, serverArgs, agentArgs []string) *v1alpha1.Cluster {
cluster := &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Name: name,
Namespace: namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "k3k.io/v1alpha1",
},
Spec: v1alpha1.ClusterSpec{
Name: name,
Token: token,
Servers: &servers,
Agents: &agents,
ClusterCIDR: clusterCIDR,
@@ -238,90 +259,18 @@ func newCluster(name, token string, servers, agents int32, clusterCIDR, serviceC
ServerArgs: serverArgs,
AgentArgs: agentArgs,
Version: version,
Mode: v1alpha1.ClusterMode(mode),
Persistence: &v1alpha1.PersistenceConfig{
Type: persistenceType,
StorageClassName: storageClassName,
},
},
}
}
func extractKubeconfig(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, serverIP string) ([]byte, error) {
nn := types.NamespacedName{
Name: cluster.Name + "-kubeconfig",
Namespace: util.ClusterNamespace(cluster),
}
var kubeSecret v1.Secret
if err := client.Get(ctx, nn, &kubeSecret); err != nil {
return nil, err
}
kubeconfig := kubeSecret.Data["kubeconfig.yaml"]
if kubeconfig == nil {
return nil, errors.New("empty kubeconfig")
}
nn = types.NamespacedName{
Name: "k3k-server-service",
Namespace: util.ClusterNamespace(cluster),
}
var k3kService v1.Service
if err := client.Get(ctx, nn, &k3kService); err != nil {
return nil, err
}
if k3kService.Spec.Type == v1.ServiceTypeNodePort {
nodePort := k3kService.Spec.Ports[0].NodePort
restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig)
if err != nil {
return nil, err
if token != "" {
cluster.Spec.TokenSecretRef = &v1.SecretReference{
Name: k3kcluster.TokenSecretName(name),
Namespace: namespace,
}
hostURL := fmt.Sprintf("https://%s:%d", serverIP, nodePort)
restConfig.Host = hostURL
clientConfig := generateKubeconfigFromRest(restConfig)
b, err := clientcmd.Write(clientConfig)
if err != nil {
return nil, err
}
kubeconfig = b
}
return kubeconfig, nil
}
func generateKubeconfigFromRest(config *rest.Config) clientcmdapi.Config {
clusters := make(map[string]*clientcmdapi.Cluster)
clusters["default-cluster"] = &clientcmdapi.Cluster{
Server: config.Host,
CertificateAuthorityData: config.CAData,
}
contexts := make(map[string]*clientcmdapi.Context)
contexts["default-context"] = &clientcmdapi.Context{
Cluster: "default-cluster",
Namespace: "default",
AuthInfo: "default",
}
authinfos := make(map[string]*clientcmdapi.AuthInfo)
authinfos["default"] = &clientcmdapi.AuthInfo{
ClientCertificateData: config.CertData,
ClientKeyData: config.KeyData,
}
clientConfig := clientcmdapi.Config{
Kind: "Config",
APIVersion: "v1",
Clusters: clusters,
Contexts: contexts,
CurrentContext: "default-context",
AuthInfos: authinfos,
}
return clientConfig
return cluster
}

View File

@@ -1 +1,48 @@
package cluster
import (
"context"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
clusterDeleteFlags = []cli.Flag{
&cli.StringFlag{
Name: "name",
Usage: "name of the cluster",
Destination: &name,
},
}
)
func delete(clx *cli.Context) error {
ctx := context.Background()
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
if err != nil {
return err
}
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
}
logrus.Infof("deleting [%s] cluster", name)
cluster := v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: cmds.Namespace(),
},
}
return ctrlClient.Delete(ctx, &cluster)
}

View File

@@ -0,0 +1,164 @@
package kubeconfig
import (
"context"
"net/url"
"os"
"path/filepath"
"strings"
"time"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/user"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func init() {
_ = clientgoscheme.AddToScheme(Scheme)
_ = v1alpha1.AddToScheme(Scheme)
}
var (
Scheme = runtime.NewScheme()
name string
cn string
org cli.StringSlice
altNames cli.StringSlice
expirationDays int64
configName string
generateKubeconfigFlags = []cli.Flag{
&cli.StringFlag{
Name: "name",
Usage: "cluster name",
Destination: &name,
},
&cli.StringFlag{
Name: "config-name",
Usage: "the name of the generated kubeconfig file",
Destination: &configName,
},
&cli.StringFlag{
Name: "cn",
Usage: "Common name (CN) of the generated certificates for the kubeconfig",
Destination: &cn,
Value: controller.AdminCommonName,
},
&cli.StringSliceFlag{
Name: "org",
Usage: "Organization name (ORG) of the generated certificates for the kubeconfig",
Value: &org,
},
&cli.StringSliceFlag{
Name: "altNames",
Usage: "altNames of the generated certificates for the kubeconfig",
Value: &altNames,
},
&cli.Int64Flag{
Name: "expiration-days",
Usage: "Expiration date of the certificates used for the kubeconfig",
Destination: &expirationDays,
Value: 356,
},
}
)
var subcommands = []*cli.Command{
{
Name: "generate",
Usage: "Generate kubeconfig for clusters",
SkipFlagParsing: false,
Action: generate,
Flags: append(cmds.CommonFlags, generateKubeconfigFlags...),
},
}
func NewCommand() *cli.Command {
return &cli.Command{
Name: "kubeconfig",
Usage: "Manage kubeconfig for clusters",
Subcommands: subcommands,
}
}
func generate(clx *cli.Context) error {
var cluster v1alpha1.Cluster
ctx := context.Background()
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
if err != nil {
return err
}
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
}
clusterKey := types.NamespacedName{
Name: name,
Namespace: cmds.Namespace(),
}
if err := ctrlClient.Get(ctx, clusterKey, &cluster); err != nil {
return err
}
url, err := url.Parse(restConfig.Host)
if err != nil {
return err
}
host := strings.Split(url.Host, ":")
certAltNames := certs.AddSANs(altNames.Value())
orgs := org.Value()
if orgs == nil {
orgs = []string{user.SystemPrivilegedGroup}
}
cfg := kubeconfig.KubeConfig{
CN: cn,
ORG: orgs,
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
AltNames: certAltNames,
}
logrus.Infof("waiting for cluster to be available..")
var kubeconfig []byte
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = cfg.Extract(ctx, ctrlClient, &cluster, host[0])
if err != nil {
return err
}
return nil
}); err != nil {
return err
}
pwd, err := os.Getwd()
if err != nil {
return err
}
if configName == "" {
configName = cluster.Name + "-kubeconfig.yaml"
}
logrus.Infof(`You can start using the cluster with:
export KUBECONFIG=%s
kubectl cluster-info
`, filepath.Join(pwd, configName))
return os.WriteFile(configName, kubeconfig, 0644)
}

View File

@@ -2,19 +2,29 @@ package cmds
import (
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v2"
)
const (
defaultNamespace = "default"
)
var (
debug bool
Kubeconfig string
namespace string
CommonFlags = []cli.Flag{
cli.StringFlag{
&cli.StringFlag{
Name: "kubeconfig",
EnvVar: "KUBECONFIG",
EnvVars: []string{"KUBECONFIG"},
Usage: "Kubeconfig path",
Destination: &Kubeconfig,
},
&cli.StringFlag{
Name: "namespace",
Usage: "Namespace to create the k3k cluster in",
Destination: &namespace,
},
}
)
@@ -23,11 +33,11 @@ func NewApp() *cli.App {
app.Name = "k3kcli"
app.Usage = "CLI for K3K"
app.Flags = []cli.Flag{
cli.BoolFlag{
&cli.BoolFlag{
Name: "debug",
Usage: "Turn on debug logs",
Destination: &debug,
EnvVar: "K3K_DEBUG",
EnvVars: []string{"K3K_DEBUG"},
},
}
@@ -40,3 +50,10 @@ func NewApp() *cli.App {
return app
}
func Namespace() string {
if namespace == "" {
return defaultNamespace
}
return namespace
}

View File

@@ -1,26 +1,28 @@
package main
import (
"fmt"
"os"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/cli/cmds/cluster"
"github.com/rancher/k3k/cli/cmds/kubeconfig"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
const (
program = "k3k"
version = "dev"
gitCommit = "HEAD"
"github.com/urfave/cli/v2"
)
func main() {
app := cmds.NewApp()
app.Commands = []cli.Command{
cluster.NewClusterCommand(),
app.Version = buildinfo.Version
cli.VersionPrinter = func(cCtx *cli.Context) {
fmt.Println("k3kcli Version: " + buildinfo.Version)
}
app.Commands = []*cli.Command{
cluster.NewCommand(),
kubeconfig.NewCommand(),
}
app.Version = version + " (" + gitCommit + ")"
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)

11
examples/clusterset.yaml Normal file
View File

@@ -0,0 +1,11 @@
apiVersion: k3k.io/v1alpha1
kind: ClusterSet
metadata:
name: clusterset-example
# spec:
# disableNetworkPolicy: false
# allowedNodeTypes:
# - "shared"
# - "virtual"
# podSecurityAdmissionLevel: "baseline"
# defaultPriorityClass: "lowpriority"

View File

@@ -3,6 +3,7 @@ kind: Cluster
metadata:
name: example1
spec:
mode: "shared"
servers: 1
agents: 3
token: test

View File

@@ -3,6 +3,7 @@ kind: Cluster
metadata:
name: single-server
spec:
mode: "shared"
servers: 1
agents: 3
token: test

263
go.mod
View File

@@ -1,74 +1,217 @@
module github.com/rancher/k3k
go 1.19
go 1.23.4
require (
github.com/sirupsen/logrus v1.8.1
github.com/urfave/cli v1.22.12
k8s.io/api v0.26.1
k8s.io/apimachinery v0.26.1
k8s.io/client-go v0.26.1
k8s.io/klog v1.0.0
replace (
github.com/google/cel-go => github.com/google/cel-go v0.17.7
github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0
github.com/prometheus/client_model => github.com/prometheus/client_model v0.6.1
github.com/prometheus/common => github.com/prometheus/common v0.47.0
golang.org/x/term => golang.org/x/term v0.15.0
)
require (
github.com/go-logr/zapr v1.3.0
github.com/onsi/ginkgo/v2 v2.21.0
github.com/onsi/gomega v1.36.0
github.com/prometheus/client_model v0.6.1
github.com/rancher/dynamiclistener v1.27.5
github.com/sirupsen/logrus v1.9.3
github.com/testcontainers/testcontainers-go v0.35.0
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0
github.com/urfave/cli/v2 v2.27.5
github.com/virtual-kubelet/virtual-kubelet v1.11.0
go.etcd.io/etcd/api/v3 v3.5.14
go.etcd.io/etcd/client/v3 v3.5.14
go.uber.org/zap v1.26.0
gopkg.in/yaml.v2 v2.4.0
helm.sh/helm/v3 v3.14.4
k8s.io/api v0.29.11
k8s.io/apimachinery v0.29.11
k8s.io/apiserver v0.29.11
k8s.io/client-go v0.29.11
k8s.io/component-base v0.29.11
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
sigs.k8s.io/controller-runtime v0.17.5
)
require (
dario.cat/mergo v1.0.1 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/BurntSushi/toml v1.4.0 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.3.0 // indirect
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/term v0.3.0 // indirect
golang.org/x/time v0.3.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.26.0 // indirect
k8s.io/component-base v0.26.1 // indirect
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
require (
github.com/go-logr/logr v1.2.3 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/containerd/containerd v1.7.24 // indirect
github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/dockercfg v0.3.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/cli v25.0.1+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker v27.1.1+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/cel-go v0.22.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmoiron/sqlx v1.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/spdystream v0.5.0 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/user v0.3.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/rancher/dynamiclistener v0.3.5
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect
golang.org/x/text v0.5.0 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rubenv/sql-migrate v1.7.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/stretchr/testify v1.10.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
go.opentelemetry.io/otel v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
go.opentelemetry.io/otel/trace v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.33.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.26.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
google.golang.org/grpc v1.65.0 // indirect
google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiserver v0.26.1
k8s.io/klog/v2 v2.80.1
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
sigs.k8s.io/controller-runtime v0.14.1
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.29.11 // indirect
k8s.io/cli-runtime v0.29.11 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kms v0.30.3 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
k8s.io/kubectl v0.29.11 // indirect
oras.land/oras-go v1.2.5 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.4 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/api v0.18.0 // indirect
sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.3 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)

1831
go.sum

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 137 KiB

View File

@@ -4,18 +4,25 @@ set -o errexit
set -o nounset
set -o pipefail
set -x
CODEGEN_GIT_PKG=https://github.com/kubernetes/code-generator.git
git clone --depth 1 ${CODEGEN_GIT_PKG} || true
K8S_VERSION=$(cat go.mod | grep -m1 "k8s.io/apiserver" | cut -d " " -f 2)
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
CODEGEN_PKG=./code-generator
"${CODEGEN_PKG}/generate-groups.sh" \
"deepcopy" \
github.com/rancher/k3k/pkg/generated \
github.com/rancher/k3k/pkg/apis \
"k3k.io:v1alpha1" \
--go-header-file "${SCRIPT_ROOT}"/hack/boilerplate.go.txt \
--output-base "$(dirname "${BASH_SOURCE[0]}")/../../../.."
# cd into the git dir to checkout the code gen version compatible with the k8s version that this is using
cd $CODEGEN_PKG
git fetch origin tag ${K8S_VERSION}
git checkout ${K8S_VERSION}
cd -
source ${CODEGEN_PKG}/kube_codegen.sh
kube::codegen::gen_helpers \
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
--input-pkg-root "${SCRIPT_ROOT}/pkg/apis" \
--output-base "${SCRIPT_ROOT}/pkg/apis"
rm -rf code-generator

334
index.yaml Normal file
View File

@@ -0,0 +1,334 @@
apiVersion: v1
entries:
k3k:
- apiVersion: v2
appVersion: v1.0.2-rc2
created: "2026-01-29T08:56:00.013920706Z"
description: A Helm chart for K3K
digest: 22fe9e44b4d3e2c61d2343f450949f87056b069faabf3dc8eddef49a6319c4ff
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-1.0.2-rc2/k3k-1.0.2-rc2.tgz
version: 1.0.2-rc2
- apiVersion: v2
appVersion: v1.0.2-rc1
created: "2026-01-15T13:14:48.456179859Z"
description: A Helm chart for K3K
digest: d8876d2a3de38d0e2e274034a71746ddfa40f5fef2a395d84e1e8396339bd725
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-1.0.2-rc1/k3k-1.0.2-rc1.tgz
version: 1.0.2-rc1
- apiVersion: v2
appVersion: v1.0.1
created: "2025-12-09T14:41:59.654224073Z"
description: A Helm chart for K3K
digest: 47d5318a4e9d60192fe6950ad111f7820cbcc39c79a85ceacd9f0c25dc5366a4
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-1.0.1/k3k-1.0.1.tgz
version: 1.0.1
- apiVersion: v2
appVersion: v1.0.1-rc2
created: "2025-12-03T14:02:05.472877082Z"
description: A Helm chart for K3K
digest: 2a7d5d915b5a0bc0f1db62bb2fb922daa0e5b61585b1856dfe64ea1527ad214e
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-1.0.1-rc2/k3k-1.0.1-rc2.tgz
version: 1.0.1-rc2
- apiVersion: v2
appVersion: v1.0.1-rc1
created: "2025-11-17T17:25:25.106204732Z"
description: A Helm chart for K3K
digest: 4b3dde184a01c4555a52af1b7a6d78ef9402e00b7630dc2d7ff6d1cc5d4dc163
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-1.0.1-rc1/k3k-1.0.1-rc1.tgz
version: 1.0.1-rc1
- apiVersion: v2
appVersion: v1.0.0
created: "2025-11-03T15:44:38.393518232Z"
description: A Helm chart for K3K
digest: 173d90bfe6d2b60af590c29090c4c32290edcbe0998c048f6d59a36460ac3e0b
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-1.0.0/k3k-1.0.0.tgz
version: 1.0.0
- apiVersion: v2
appVersion: v1.0.0-rc3
created: "2025-10-31T16:01:53.317011317Z"
description: A Helm chart for K3K
digest: 88c034e940e4714d073e16a9686c81de873cb9358146bb83079298bbf3c12216
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-1.0.0-rc3/k3k-1.0.0-rc3.tgz
version: 1.0.0-rc3
- apiVersion: v2
appVersion: v1.0.0-rc2
created: "2025-10-28T15:30:42.120914789Z"
description: A Helm chart for K3K
digest: 2383239f7dd671361ac63b41258d37dafc602d4ab0150699eb777d6706b483b3
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-1.0.0-rc2/k3k-1.0.0-rc2.tgz
version: 1.0.0-rc2
- apiVersion: v2
appVersion: v1.0.0-rc1
created: "2025-10-14T13:19:34.016218173Z"
description: A Helm chart for K3K
digest: 4facfe1cc00be65a79a885c4a2d3be4e62646c4df9fd35691f0851db8563ddb5
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-1.0.0-rc1/k3k-1.0.0-rc1.tgz
version: 1.0.0-rc1
- apiVersion: v2
appVersion: v0.3.5
created: "2025-09-30T12:27:28.916176598Z"
description: A Helm chart for K3K
digest: 01c7f514530504980f8ee28092d3d584ddb6beebf730e125a2a371126261b6ad
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.5/k3k-0.3.5.tgz
version: 0.3.5
- apiVersion: v2
appVersion: v0.3.5-rc1
created: "2025-09-17T09:16:58.061714814Z"
description: A Helm chart for K3K
digest: 672a6f9cb7d9c9a600d2e8c6f022221c7db061f13ec173ade36196fd87152aa8
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.5-rc1/k3k-0.3.5-rc1.tgz
version: 0.3.5-rc1
- apiVersion: v2
appVersion: v0.3.4
created: "2025-08-28T08:57:50.805906218Z"
description: A Helm chart for K3K
digest: bfac6ec18a5a25dfe1d1ad35b4c09be6e3c8c7739c5230655c1eba3f9f39585d
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.4/k3k-0.3.4.tgz
version: 0.3.4
- apiVersion: v2
appVersion: v0.3.4-rc3
created: "2025-08-25T17:03:08.195077205Z"
description: A Helm chart for K3K
digest: c7e8ba5c75c5c94dcf05f5667e9aca7dbfde1df68c72dd5139c15889f49a4dd3
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.4-rc3/k3k-0.3.4-rc3.tgz
version: 0.3.4-rc3
- apiVersion: v2
appVersion: v0.3.4-rc2
created: "2025-08-19T08:57:52.214719255Z"
description: A Helm chart for K3K
digest: e177c8e12a17d0c22084b42ea0b05983799501bb852192b41f3f3a357ff6542b
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.4-rc2/k3k-0.3.4-rc2.tgz
version: 0.3.4-rc2
- apiVersion: v2
appVersion: v0.3.4-rc1
created: "2025-07-24T15:13:44.735105812Z"
description: A Helm chart for K3K
digest: 8bf37262fb23265ab0afb2e1cfae17f80f211b3536f226bb43fc638586d65737
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.4-rc1/k3k-0.3.4-rc1.tgz
version: 0.3.4-rc1
- apiVersion: v2
appVersion: v0.3.3
created: "2025-06-30T08:44:59.953223554Z"
description: A Helm chart for K3K
digest: 679b917d6cffe7f649c3e11b8577e477376359bcaee08cf4160beb69de6cd03c
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.3/k3k-0.3.3.tgz
version: 0.3.3
- apiVersion: v2
appVersion: v0.3.3-rc6
created: "2025-06-27T14:06:59.461066242Z"
description: A Helm chart for K3K
digest: d9c4163660f7814d98e468b4077ba98822c218a71c083c9b1c0742a1b0d63503
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.3-r6/k3k-0.3.3-r6.tgz
version: 0.3.3-r6
- apiVersion: v2
appVersion: v0.3.3-rc5
created: "2025-06-25T08:51:00.220876148Z"
description: A Helm chart for K3K
digest: e2fb4b93ada759ec3b50c3f381de7bd17bd74aa48c64fe94310607662218ea88
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.3-r5/k3k-0.3.3-r5.tgz
version: 0.3.3-r5
- apiVersion: v2
appVersion: v0.3.3-rc4
created: "2025-06-24T13:00:57.51443719Z"
description: A Helm chart for K3K
digest: a8f04ed83fb34c9e9daa7828a496d9260ff1686e2d0008735e0aabc158dff2b2
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.3-r4/k3k-0.3.3-r4.tgz
version: 0.3.3-r4
- apiVersion: v2
appVersion: v0.3.3-rc3
created: "2025-06-20T16:20:26.393275671Z"
description: A Helm chart for K3K
digest: 0528181d151b13762b98f2dfd45d6357c034f7b89380944e2a177d8e62feaa10
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.3-r3/k3k-0.3.3-r3.tgz
version: 0.3.3-r3
- apiVersion: v2
appVersion: v0.3.3-rc1
created: "2025-06-04T07:57:47.069248739Z"
description: A Helm chart for K3K
digest: 80643b92f3b35b9f71096e9231c73185086516831c80f498c5dda2e130ff9614
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.3-r1/k3k-0.3.3-r1.tgz
version: 0.3.3-r1
- apiVersion: v2
appVersion: v0.3.2
created: "2025-04-18T10:45:23.246160851Z"
description: A Helm chart for K3K
digest: 505d190ef24da6265ad2aaf3e262ba9b7c0709407caa3cca9d2844016976bf77
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.2/k3k-0.3.2.tgz
version: 0.3.2
- apiVersion: v2
appVersion: v0.3.1
created: "2025-03-21T01:30:36.632888085Z"
description: A Helm chart for K3K
digest: a610031362ff92f0b354bf5ae73ef58773a604c9e3864a36dbd491211c3d43b8
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.1-r2/k3k-0.3.1-r2.tgz
version: 0.3.1-r2
- apiVersion: v2
appVersion: v0.3.1-rc1
created: "2025-03-03T16:15:27.474796611Z"
description: A Helm chart for K3K
digest: 68ea6319dfecdcaa0da0fe17fb1dee6ee7919a31489d1ab0c31894c9ffa75bf4
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.1-r1/k3k-0.3.1-r1.tgz
version: 0.3.1-r1
- apiVersion: v2
appVersion: v0.3.0
created: "2025-02-17T13:13:16.005242178Z"
description: A Helm chart for K3K
digest: e543450b8960dc559823327381c0aef3d291785297367246c08607083ff08a77
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.3.0-r1/k3k-0.3.0-r1.tgz
version: 0.3.0-r1
- apiVersion: v2
appVersion: v0.2.2-rc5
created: "2025-02-14T13:36:51.20921457Z"
description: A Helm chart for K3K
digest: 4cfa0028c8e73c7cb6a02168c6547e49e1f895f51a7eb51e2b9dd60754798c68
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.1.6-r1/k3k-0.1.6-r1.tgz
version: 0.1.6-r1
- apiVersion: v2
appVersion: v0.2.2-rc4
created: "2025-01-23T19:04:26.116807778Z"
description: A Helm chart for K3K
digest: ac4b667ec3e9f7d7f1cc9500bc2b66659e642774eb74a459afca9771dcdcaf43
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.1.5-r1/k3k-0.1.5-r1.tgz
version: 0.1.5-r1
- apiVersion: v2
appVersion: 0.2.0
created: "2024-03-15T00:14:20.084301115Z"
description: A Helm chart for K3K
digest: 3e84624544426312d541cd9157075ce9eaa48a3fcbd51cb616696a33098f6cab
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.1.4-r1/k3k-0.1.4-r1.tgz
version: 0.1.4-r1
- apiVersion: v2
appVersion: 0.2.0
created: "2024-02-15T22:04:54.039214701Z"
description: A Helm chart for K3K
digest: 750470714dbe548ec72fb4b297f02fb14acc7debc1df681a71a57f3dc639ac74
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.1.3-r1/k3k-0.1.3-r1.tgz
version: 0.1.3-r1
- apiVersion: v2
appVersion: 0.1.1
created: "2024-01-06T05:45:05.385260037Z"
description: A Helm chart for K3K
digest: 387721f339d1ce28e77a77c2c56e4598ea8b8e9828cb52dd014313f45efac0d0
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.1.2-r1/k3k-0.1.2-r1.tgz
version: 0.1.2-r1
- apiVersion: v2
appVersion: 0.1.0
created: "2024-01-03T00:59:14.9735535Z"
description: A Helm chart for K3K
digest: 57b5d181809031e781bcea2deb32f8169c64fb52f312d2c2e34039b00700fbff
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.1.1-r1/k3k-0.1.1-r1.tgz
version: 0.1.1-r1
- apiVersion: v2
appVersion: 0.0.0-alpha7
created: "2023-07-03T21:37:09.595779207Z"
description: A Helm chart for K3K
digest: 06a53a68ce620e9bf736d02aba7e7db58c3e85795e99c9eb006997cd2f7889bb
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.1.0-r2/k3k-0.1.0-r2.tgz
version: 0.1.0-r2
- apiVersion: v2
appVersion: 0.0.0-alpha6
created: "2023-06-23T21:52:08.959064023Z"
description: A Helm chart for K3K
digest: 5e480db568f34f3be4686f93e0134a3fcbb0a6f9a566c02c745456102c35d880
name: k3k
type: application
urls:
- https://github.com/rancher/k3k/releases/download/chart-0.1.0-r1/k3k-0.1.0-r1.tgz
version: 0.1.0-r1
generated: "2026-01-29T08:56:00.014531538Z"

34
k3k-kubelet/README.md Normal file
View File

@@ -0,0 +1,34 @@
## Virtual Kubelet
This package provides an impelementation of a virtual cluster node using [virtual-kubelet](https://github.com/virtual-kubelet/virtual-kubelet).
The implementation is based on several projects, including:
- [Virtual Kubelet](https://github.com/virtual-kubelet/virtual-kubelet)
- [Kubectl](https://github.com/kubernetes/kubectl)
- [Client-go](https://github.com/kubernetes/client-go)
- [Azure-Aci](https://github.com/virtual-kubelet/azure-aci)
## Overview
This project creates a node that registers itself in the virtual cluster. When workloads are scheduled to this node, it simply creates/updates the workload on the host cluster.
## Usage
Build/Push the image using (from the root of rancher/k3k):
```
make build
docker buildx build -f package/Dockerfile . -t $REPO/$IMAGE:$TAG
```
When running, it is recommended to deploy a k3k cluster with 1 server (with `--disable-agent` as a server arg) and no agents (so that the workloads can only be scheduled on the virtual node/host cluster).
After the image is built, it should be deployed with the following ENV vars set:
- `CLUSTER_NAME` should be the name of the cluster.
- `CLUSTER_NAMESPACE` should be the namespace the cluster is running in.
- `HOST_KUBECONFIG` should be the path on the local filesystem (in container) to a kubeconfig for the host cluster (likely stored in a secret/mounted as a volume).
- `VIRT_KUBECONFIG`should be the path on the local filesystem (in container) to a kubeconfig for the virtual cluster (likely stored in a secret/mounted as a volume).
- `VIRT_POD_IP` should be the IP that the container is accessible from.
This project is still under development and there are many features yet to be implemented, but it can run a basic nginx pod.

87
k3k-kubelet/config.go Normal file
View File

@@ -0,0 +1,87 @@
package main
import (
"errors"
"os"
"gopkg.in/yaml.v2"
)
// config has all virtual-kubelet startup options
type config struct {
ClusterName string `yaml:"clusterName,omitempty"`
ClusterNamespace string `yaml:"clusterNamespace,omitempty"`
NodeName string `yaml:"nodeName,omitempty"`
Token string `yaml:"token,omitempty"`
AgentHostname string `yaml:"agentHostname,omitempty"`
HostConfigPath string `yaml:"hostConfigPath,omitempty"`
VirtualConfigPath string `yaml:"virtualConfigPath,omitempty"`
KubeletPort string `yaml:"kubeletPort,omitempty"`
ServerIP string `yaml:"serverIP,omitempty"`
Version string `yaml:"version,omitempty"`
}
func (c *config) unmarshalYAML(data []byte) error {
var conf config
if err := yaml.Unmarshal(data, &conf); err != nil {
return err
}
if c.ClusterName == "" {
c.ClusterName = conf.ClusterName
}
if c.ClusterNamespace == "" {
c.ClusterNamespace = conf.ClusterNamespace
}
if c.HostConfigPath == "" {
c.HostConfigPath = conf.HostConfigPath
}
if c.VirtualConfigPath == "" {
c.VirtualConfigPath = conf.VirtualConfigPath
}
if c.KubeletPort == "" {
c.KubeletPort = conf.KubeletPort
}
if c.AgentHostname == "" {
c.AgentHostname = conf.AgentHostname
}
if c.NodeName == "" {
c.NodeName = conf.NodeName
}
if c.Token == "" {
c.Token = conf.Token
}
if c.ServerIP == "" {
c.ServerIP = conf.ServerIP
}
if c.Version == "" {
c.Version = conf.Version
}
return nil
}
func (c *config) validate() error {
if c.ClusterName == "" {
return errors.New("cluster name is not provided")
}
if c.ClusterNamespace == "" {
return errors.New("cluster namespace is not provided")
}
if c.AgentHostname == "" {
return errors.New("agent Hostname is not provided")
}
return nil
}
func (c *config) parse(path string) error {
if _, err := os.Stat(path); os.IsNotExist(err) {
return nil
}
b, err := os.ReadFile(path)
if err != nil {
return err
}
return c.unmarshalYAML(b)
}

View File

@@ -0,0 +1,166 @@
package controller
import (
"context"
"fmt"
"sync"
"github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
type ConfigMapSyncer struct {
mutex sync.RWMutex
// VirtualClient is the client for the virtual cluster
VirtualClient client.Client
// CoreClient is the client for the host cluster
HostClient client.Client
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
// representation
TranslateFunc func(*corev1.ConfigMap) (*corev1.ConfigMap, error)
// Logger is the logger that the controller will use
Logger *k3klog.Logger
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
// through add/remove
objs sets.Set[types.NamespacedName]
}
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
if !c.isWatching(req.NamespacedName) {
// return immediately without re-enqueueing. We aren't watching this resource
return reconcile.Result{}, nil
}
var virtual corev1.ConfigMap
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to get configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translated, err := c.TranslateFunc(&virtual)
if err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to translate configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translatedKey := types.NamespacedName{
Namespace: translated.Namespace,
Name: translated.Name,
}
var host corev1.ConfigMap
if err = c.HostClient.Get(ctx, translatedKey, &host); err != nil {
if apierrors.IsNotFound(err) {
err = c.HostClient.Create(ctx, translated)
// for simplicity's sake, we don't check for conflict errors. The existing object will get
// picked up on in the next re-enqueue
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to create host configmap %s/%s for virtual configmap %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host configmap %s/%s: %w", translated.Namespace, translated.Name, err)
}
// we are going to use the host in order to avoid conflicts on update
host.Data = translated.Data
if host.Labels == nil {
host.Labels = make(map[string]string, len(translated.Labels))
}
// we don't want to override labels made on the host cluster by other applications
// but we do need to make sure the labels that the kubelet uses to track host cluster values
// are being tracked appropriately
for key, value := range translated.Labels {
host.Labels[key] = value
}
if err = c.HostClient.Update(ctx, &host); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to update host configmap %s/%s for virtual configmap %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{}, nil
}
// isWatching is a utility method to determine if a key is in objs without the caller needing
// to handle mutex lock/unlock.
func (c *ConfigMapSyncer) isWatching(key types.NamespacedName) bool {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.objs.Has(key)
}
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
// same resource.
func (c *ConfigMapSyncer) AddResource(ctx context.Context, namespace, name string) error {
objKey := types.NamespacedName{
Namespace: namespace,
Name: name,
}
// if we already sync this object, no need to writelock/add it
if c.isWatching(objKey) {
return nil
}
// lock in write mode since we are now adding the key
c.mutex.Lock()
if c.objs == nil {
c.objs = sets.Set[types.NamespacedName]{}
}
c.objs = c.objs.Insert(objKey)
c.mutex.Unlock()
_, err := c.Reconcile(ctx, reconcile.Request{
NamespacedName: objKey,
})
if err != nil {
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
}
return nil
}
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
// removed resource.
func (c *ConfigMapSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
objKey := types.NamespacedName{
Namespace: namespace,
Name: name,
}
// if we don't sync this object, no need to writelock/add it
if !c.isWatching(objKey) {
return nil
}
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
}, func() error {
return c.removeHostConfigMap(ctx, namespace, name)
}); err != nil {
return fmt.Errorf("unable to remove configmap: %w", err)
}
c.mutex.Lock()
if c.objs == nil {
c.objs = sets.Set[types.NamespacedName]{}
}
c.objs = c.objs.Delete(objKey)
c.mutex.Unlock()
return nil
}
func (c *ConfigMapSyncer) removeHostConfigMap(ctx context.Context, virtualNamespace, virtualName string) error {
var vConfigMap corev1.ConfigMap
err := c.VirtualClient.Get(ctx, types.NamespacedName{Namespace: virtualNamespace, Name: virtualName}, &vConfigMap)
if err != nil {
return fmt.Errorf("unable to get virtual configmap %s/%s: %w", virtualNamespace, virtualName, err)
}
translated, err := c.TranslateFunc(&vConfigMap)
if err != nil {
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
}
return c.HostClient.Delete(ctx, translated)
}

View File

@@ -0,0 +1,119 @@
package controller
import (
"context"
"fmt"
"sync"
"github.com/rancher/k3k/k3k-kubelet/translate"
k3klog "github.com/rancher/k3k/pkg/log"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
type ControllerHandler struct {
sync.RWMutex
// Mgr is the manager used to run new controllers - from the virtual cluster
Mgr manager.Manager
// Scheme is the scheme used to run new controllers - from the virtual cluster
Scheme runtime.Scheme
// HostClient is the client used to communicate with the host cluster
HostClient client.Client
// VirtualClient is the client used to communicate with the virtual cluster
VirtualClient client.Client
// Translater is the translater that will be used to adjust objects before they
// are made on the host cluster
Translater translate.ToHostTranslater
// Logger is the logger that the controller will use to log errors
Logger *k3klog.Logger
// controllers are the controllers which are currently running
controllers map[schema.GroupVersionKind]updateableReconciler
}
// updateableReconciler is a reconciler that only syncs specific resources (by name/namespace). This list can
// be altered through the Add and Remove methods
type updateableReconciler interface {
reconcile.Reconciler
AddResource(ctx context.Context, namespace string, name string) error
RemoveResource(ctx context.Context, namespace string, name string) error
}
func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object) error {
c.RLock()
controllers := c.controllers
if controllers != nil {
if r, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]; ok {
err := r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
c.RUnlock()
return err
}
}
// we need to manually lock/unlock since we intned on write locking to add a new controller
c.RUnlock()
var r updateableReconciler
switch obj.(type) {
case *v1.Secret:
r = &SecretSyncer{
HostClient: c.HostClient,
VirtualClient: c.VirtualClient,
// TODO: Need actual function
TranslateFunc: func(s *v1.Secret) (*v1.Secret, error) {
// note that this doesn't do any type safety - fix this
// when generics work
c.Translater.TranslateTo(s)
// Remove service-account-token types when synced to the host
if s.Type == v1.SecretTypeServiceAccountToken {
s.Type = v1.SecretTypeOpaque
}
return s, nil
},
Logger: c.Logger,
}
case *v1.ConfigMap:
r = &ConfigMapSyncer{
HostClient: c.HostClient,
VirtualClient: c.VirtualClient,
// TODO: Need actual function
TranslateFunc: func(s *v1.ConfigMap) (*v1.ConfigMap, error) {
c.Translater.TranslateTo(s)
return s, nil
},
Logger: c.Logger,
}
default:
// TODO: Technically, the configmap/secret syncers are relatively generic, and this
// logic could be used for other types.
return fmt.Errorf("unrecognized type: %T", obj)
}
err := ctrl.NewControllerManagedBy(c.Mgr).
For(&v1.ConfigMap{}).
Complete(r)
if err != nil {
return fmt.Errorf("unable to start configmap controller: %w", err)
}
c.Lock()
if c.controllers == nil {
c.controllers = map[schema.GroupVersionKind]updateableReconciler{}
}
c.controllers[obj.GetObjectKind().GroupVersionKind()] = r
c.Unlock()
return r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
}
func (c *ControllerHandler) RemoveResource(ctx context.Context, obj client.Object) error {
// since we aren't adding a new controller, we don't need to lock
c.RLock()
ctrl, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]
c.RUnlock()
if !ok {
return fmt.Errorf("no controller found for gvk %s", obj.GetObjectKind().GroupVersionKind())
}
return ctrl.RemoveResource(ctx, obj.GetNamespace(), obj.GetName())
}

View File

@@ -0,0 +1,121 @@
package controller
import (
"context"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/log"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
pvcController = "pvc-syncer-controller"
pvcFinalizerName = "pvc.k3k.io/finalizer"
)
type PVCReconciler struct {
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
clusterName string
clusterNamespace string
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
logger *log.Logger
Translater translate.ToHostTranslater
}
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
translater := translate.ToHostTranslater{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := PVCReconciler{
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
logger: logger.Named(pvcController),
Translater: translater,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
}
return ctrl.NewControllerManagedBy(virtMgr).
For(&v1.PersistentVolumeClaim{}).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Complete(&reconciler)
}
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := r.logger.With("Cluster", r.clusterName, "PersistentVolumeClaim", req.NamespacedName)
var (
virtPVC v1.PersistentVolumeClaim
hostPVC v1.PersistentVolumeClaim
cluster v1alpha1.Cluster
)
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
// handling persistent volume sync
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedPVC := r.pvc(&virtPVC)
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostScheme); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtPVC.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
if err := r.hostClient.Delete(ctx, syncedPVC); !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// remove the finalizer after cleaning up the synced service
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// getting the cluster for setting the controller reference
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
return reconcile.Result{}, err
}
}
// create or update the pvc on host
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: syncedPVC.Name, Namespace: r.clusterNamespace}, &hostPVC); err != nil {
if apierrors.IsNotFound(err) {
log.Info("creating the persistent volume for the first time on the host cluster")
return reconcile.Result{}, r.hostClient.Create(ctx, syncedPVC)
}
return reconcile.Result{}, err
}
log.Info("updating pvc on the host cluster")
return reconcile.Result{}, r.hostClient.Update(ctx, syncedPVC)
}
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
hostPVC := obj.DeepCopy()
r.Translater.TranslateTo(hostPVC)
return hostPVC
}

View File

@@ -0,0 +1,170 @@
package controller
import (
"context"
"fmt"
"sync"
"github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
type SecretSyncer struct {
mutex sync.RWMutex
// VirtualClient is the client for the virtual cluster
VirtualClient client.Client
// CoreClient is the client for the host cluster
HostClient client.Client
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
// representation
TranslateFunc func(*corev1.Secret) (*corev1.Secret, error)
// Logger is the logger that the controller will use
Logger *k3klog.Logger
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
// through add/remove
objs sets.Set[types.NamespacedName]
}
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
if !s.isWatching(req.NamespacedName) {
// return immediately without re-enqueueing. We aren't watching this resource
return reconcile.Result{}, nil
}
var virtual corev1.Secret
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to get secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translated, err := s.TranslateFunc(&virtual)
if err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to translate secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translatedKey := types.NamespacedName{
Namespace: translated.Namespace,
Name: translated.Name,
}
var host corev1.Secret
if err = s.HostClient.Get(ctx, translatedKey, &host); err != nil {
if apierrors.IsNotFound(err) {
err = s.HostClient.Create(ctx, translated)
// for simplicity's sake, we don't check for conflict errors. The existing object will get
// picked up on in the next re-enqueue
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to create host secret %s/%s for virtual secret %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host secret %s/%s: %w", translated.Namespace, translated.Name, err)
}
// we are going to use the host in order to avoid conflicts on update
host.Data = translated.Data
if host.Labels == nil {
host.Labels = make(map[string]string, len(translated.Labels))
}
// we don't want to override labels made on the host cluster by other applications
// but we do need to make sure the labels that the kubelet uses to track host cluster values
// are being tracked appropriately
for key, value := range translated.Labels {
host.Labels[key] = value
}
if err = s.HostClient.Update(ctx, &host); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to update host secret %s/%s for virtual secret %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{}, nil
}
// isWatching is a utility method to determine if a key is in objs without the caller needing
// to handle mutex lock/unlock.
func (s *SecretSyncer) isWatching(key types.NamespacedName) bool {
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.objs.Has(key)
}
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
// same resource.
func (s *SecretSyncer) AddResource(ctx context.Context, namespace, name string) error {
objKey := types.NamespacedName{
Namespace: namespace,
Name: name,
}
// if we already sync this object, no need to writelock/add it
if s.isWatching(objKey) {
return nil
}
// lock in write mode since we are now adding the key
s.mutex.Lock()
if s.objs == nil {
s.objs = sets.Set[types.NamespacedName]{}
}
s.objs = s.objs.Insert(objKey)
s.mutex.Unlock()
_, err := s.Reconcile(ctx, reconcile.Request{
NamespacedName: objKey,
})
if err != nil {
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
}
return nil
}
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
// removed resource.
func (s *SecretSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
objKey := types.NamespacedName{
Namespace: namespace,
Name: name,
}
// if we don't sync this object, no need to writelock/add it
if !s.isWatching(objKey) {
return nil
}
// lock in write mode since we are now adding the key
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
}, func() error {
return s.removeHostSecret(ctx, namespace, name)
}); err != nil {
return fmt.Errorf("unable to remove secret: %w", err)
}
s.mutex.Lock()
if s.objs == nil {
s.objs = sets.Set[types.NamespacedName]{}
}
s.objs = s.objs.Delete(objKey)
s.mutex.Unlock()
return nil
}
func (s *SecretSyncer) removeHostSecret(ctx context.Context, virtualNamespace, virtualName string) error {
var vSecret corev1.Secret
err := s.VirtualClient.Get(ctx, types.NamespacedName{
Namespace: virtualNamespace,
Name: virtualName,
}, &vSecret)
if err != nil {
return fmt.Errorf("unable to get virtual secret %s/%s: %w", virtualNamespace, virtualName, err)
}
translated, err := s.TranslateFunc(&vSecret)
if err != nil {
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
}
return s.HostClient.Delete(ctx, translated)
}

View File

@@ -0,0 +1,126 @@
package controller
import (
"context"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/log"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
serviceSyncerController = "service-syncer-controller"
maxConcurrentReconciles = 1
serviceFinalizerName = "service.k3k.io/finalizer"
)
type ServiceReconciler struct {
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
clusterName string
clusterNamespace string
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
logger *log.Logger
Translater translate.ToHostTranslater
}
// AddServiceSyncer adds service syncer controller to the manager of the virtual cluster
func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
translater := translate.ToHostTranslater{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := ServiceReconciler{
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
logger: logger.Named(serviceSyncerController),
Translater: translater,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
}
return ctrl.NewControllerManagedBy(virtMgr).
For(&v1.Service{}).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Complete(&reconciler)
}
func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := s.logger.With("Cluster", s.clusterName, "Service", req.NamespacedName)
if req.Name == "kubernetes" || req.Name == "kube-dns" {
return reconcile.Result{}, nil
}
var (
virtService v1.Service
hostService v1.Service
cluster v1alpha1.Cluster
)
// getting the cluster for setting the controller reference
if err := s.hostClient.Get(ctx, types.NamespacedName{Name: s.clusterName, Namespace: s.clusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := s.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedService := s.service(&virtService)
if err := controllerutil.SetControllerReference(&cluster, syncedService, s.HostScheme); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtService.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
if err := s.hostClient.Delete(ctx, syncedService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// remove the finalizer after cleaning up the synced service
if controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName)
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if !controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
controllerutil.AddFinalizer(&virtService, serviceFinalizerName)
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
return reconcile.Result{}, err
}
}
// create or update the service on host
if err := s.hostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: s.clusterNamespace}, &hostService); err != nil {
if apierrors.IsNotFound(err) {
log.Info("creating the service for the first time on the host cluster")
return reconcile.Result{}, s.hostClient.Create(ctx, syncedService)
}
return reconcile.Result{}, err
}
log.Info("updating service on the host cluster")
return reconcile.Result{}, s.hostClient.Update(ctx, syncedService)
}
func (s *ServiceReconciler) service(obj *v1.Service) *v1.Service {
hostService := obj.DeepCopy()
s.Translater.TranslateTo(hostService)
// don't sync finalizers to the host
return hostService
}

View File

@@ -0,0 +1,161 @@
package webhook
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/log"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
const (
webhookName = "podmutator.k3k.io"
webhookTimeout = int32(10)
webhookPort = "9443"
webhookPath = "/mutate--v1-pod"
FieldpathField = "k3k.io/fieldpath"
)
type webhookHandler struct {
client ctrlruntimeclient.Client
scheme *runtime.Scheme
nodeName string
clusterName string
clusterNamespace string
logger *log.Logger
}
// AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to
// modify the nodeName of the created pods with the name of the virtual kubelet node name
// as well as remove any status fields of the downward apis env fields
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName string, logger *log.Logger) error {
handler := webhookHandler{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
logger: logger,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
nodeName: nodeName,
}
// create mutator webhook configuration to the cluster
config, err := handler.configuration(ctx, hostClient)
if err != nil {
return err
}
if err := handler.client.Create(ctx, config); err != nil {
return err
}
// register webhook with the manager
return ctrl.NewWebhookManagedBy(mgr).For(&v1.Pod{}).WithDefaulter(&handler).Complete()
}
func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error {
pod, ok := obj.(*v1.Pod)
if !ok {
return fmt.Errorf("invalid request: object was type %t not cluster", obj)
}
w.logger.Infow("mutator webhook request", "Pod", pod.Name, "Namespace", pod.Namespace)
if pod.Spec.NodeName == "" {
pod.Spec.NodeName = w.nodeName
}
// look for status.* fields in the env
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
}
for i, container := range pod.Spec.Containers {
for j, env := range container.Env {
if env.ValueFrom == nil || env.ValueFrom.FieldRef == nil {
continue
}
fieldPath := env.ValueFrom.FieldRef.FieldPath
if strings.Contains(fieldPath, "status.") {
annotationKey := fmt.Sprintf("%s_%d_%s", FieldpathField, i, env.Name)
pod.Annotations[annotationKey] = fieldPath
pod.Spec.Containers[i].Env = removeEnv(pod.Spec.Containers[i].Env, j)
}
}
}
return nil
}
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
w.logger.Infow("extracting webhook tls from host cluster")
var (
webhookTLSSecret v1.Secret
)
if err := hostClient.Get(ctx, types.NamespacedName{Name: agent.WebhookSecretName(w.clusterName), Namespace: w.clusterNamespace}, &webhookTLSSecret); err != nil {
return nil, err
}
caBundle, ok := webhookTLSSecret.Data["ca.crt"]
if !ok {
return nil, errors.New("webhook CABundle does not exist in secret")
}
webhookURL := "https://" + w.nodeName + ":" + webhookPort + webhookPath
return &admissionregistrationv1.MutatingWebhookConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: "admissionregistration.k8s.io/v1",
Kind: "MutatingWebhookConfiguration",
},
ObjectMeta: metav1.ObjectMeta{
Name: webhookName + "-configuration",
},
Webhooks: []admissionregistrationv1.MutatingWebhook{
{
Name: webhookName,
AdmissionReviewVersions: []string{"v1"},
SideEffects: ptr.To(admissionregistrationv1.SideEffectClassNone),
TimeoutSeconds: ptr.To(webhookTimeout),
ClientConfig: admissionregistrationv1.WebhookClientConfig{
URL: ptr.To(webhookURL),
CABundle: caBundle,
},
Rules: []admissionregistrationv1.RuleWithOperations{
{
Operations: []admissionregistrationv1.OperationType{
"CREATE",
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"pods"},
Scope: ptr.To(admissionregistrationv1.NamespacedScope),
},
},
},
},
},
}, nil
}
func removeEnv(envs []v1.EnvVar, i int) []v1.EnvVar {
envs[i] = envs[len(envs)-1]
return envs[:len(envs)-1]
}
func ParseFieldPathAnnotationKey(annotationKey string) (int, string, error) {
s := strings.SplitN(annotationKey, "_", 3)
if len(s) != 3 {
return -1, "", errors.New("fieldpath annotation is not set correctly")
}
containerIndex, err := strconv.Atoi(s[1])
if err != nil {
return -1, "", err
}
envName := s[2]
return containerIndex, envName, nil
}

377
k3k-kubelet/kubelet.go Normal file
View File

@@ -0,0 +1,377 @@
package main
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"net/http"
"time"
certutil "github.com/rancher/dynamiclistener/cert"
k3kkubeletcontroller "github.com/rancher/k3k/k3k-kubelet/controller"
k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
k3klog "github.com/rancher/k3k/pkg/log"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/node"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/kubernetes"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
var (
baseScheme = runtime.NewScheme()
k3kKubeletName = "k3k-kubelet"
)
func init() {
_ = clientgoscheme.AddToScheme(baseScheme)
_ = v1alpha1.AddToScheme(baseScheme)
}
type kubelet struct {
virtualCluster v1alpha1.Cluster
name string
port int
hostConfig *rest.Config
virtConfig *rest.Config
agentIP string
dnsIP string
hostClient ctrlruntimeclient.Client
virtClient kubernetes.Interface
hostMgr manager.Manager
virtualMgr manager.Manager
node *nodeutil.Node
logger *k3klog.Logger
token string
}
func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet, error) {
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostConfigPath)
if err != nil {
return nil, err
}
hostClient, err := ctrlruntimeclient.New(hostConfig, ctrlruntimeclient.Options{
Scheme: baseScheme,
})
if err != nil {
return nil, err
}
virtConfig, err := virtRestConfig(ctx, c.VirtualConfigPath, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
if err != nil {
return nil, err
}
virtClient, err := kubernetes.NewForConfig(virtConfig)
if err != nil {
return nil, err
}
hostMgr, err := ctrl.NewManager(hostConfig, manager.Options{
Scheme: baseScheme,
Metrics: ctrlserver.Options{
BindAddress: ":8083",
},
Cache: cache.Options{
DefaultNamespaces: map[string]cache.Config{
c.ClusterNamespace: {},
},
},
})
if err != nil {
return nil, errors.New("unable to create controller-runtime mgr for host cluster: " + err.Error())
}
virtualScheme := runtime.NewScheme()
// virtual client will only use core types (for now), no need to add anything other than the basics
err = clientgoscheme.AddToScheme(virtualScheme)
if err != nil {
return nil, errors.New("unable to add client go types to virtual cluster scheme: " + err.Error())
}
webhookServer := webhook.NewServer(webhook.Options{
CertDir: "/opt/rancher/k3k-webhook",
})
virtualMgr, err := ctrl.NewManager(virtConfig, manager.Options{
Scheme: virtualScheme,
WebhookServer: webhookServer,
Metrics: ctrlserver.Options{
BindAddress: ":8084",
},
})
if err != nil {
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
}
logger.Info("adding pod mutator webhook")
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.NodeName, logger); err != nil {
if !apierrors.IsAlreadyExists(err) {
return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error())
}
}
logger.Info("adding service syncer controller")
if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
return nil, errors.New("failed to add service syncer controller: " + err.Error())
}
logger.Info("adding pvc syncer controller")
if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
return nil, errors.New("failed to add pvc syncer controller: " + err.Error())
}
clusterIP, err := clusterIP(ctx, c.AgentHostname, c.ClusterNamespace, hostClient)
if err != nil {
return nil, errors.New("failed to extract the clusterIP for the server service: " + err.Error())
}
// get the cluster's DNS IP to be injected to pods
var dnsService v1.Service
dnsName := controller.SafeConcatNameWithPrefix(c.ClusterName, "kube-dns")
if err := hostClient.Get(ctx, types.NamespacedName{Name: dnsName, Namespace: c.ClusterNamespace}, &dnsService); err != nil {
return nil, errors.New("failed to get the DNS service for the cluster: " + err.Error())
}
var virtualCluster v1alpha1.Cluster
if err := hostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &virtualCluster); err != nil {
return nil, errors.New("failed to get virtualCluster spec: " + err.Error())
}
return &kubelet{
virtualCluster: virtualCluster,
name: c.NodeName,
hostConfig: hostConfig,
hostClient: hostClient,
virtConfig: virtConfig,
virtClient: virtClient,
hostMgr: hostMgr,
virtualMgr: virtualMgr,
agentIP: clusterIP,
logger: logger.Named(k3kKubeletName),
token: c.Token,
dnsIP: dnsService.Spec.ClusterIP,
}, nil
}
func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostClient ctrlruntimeclient.Client) (string, error) {
var service v1.Service
serviceKey := types.NamespacedName{Namespace: clusterNamespace, Name: serviceName}
if err := hostClient.Get(ctx, serviceKey, &service); err != nil {
return "", err
}
return service.Spec.ClusterIP, nil
}
func (k *kubelet) registerNode(ctx context.Context, agentIP, srvPort, namespace, name, hostname, serverIP, dnsIP, version string) error {
providerFunc := k.newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version)
nodeOpts := k.nodeOpts(ctx, srvPort, namespace, name, hostname, agentIP)
var err error
k.node, err = nodeutil.NewNode(k.name, providerFunc, nodeutil.WithClient(k.virtClient), nodeOpts)
if err != nil {
return errors.New("unable to start kubelet: " + err.Error())
}
return nil
}
func (k *kubelet) start(ctx context.Context) {
// any one of the following 3 tasks (host manager, virtual manager, node) crashing will stop the
// program, and all 3 of them block on start, so we start them here in go-routines
go func() {
err := k.hostMgr.Start(ctx)
if err != nil {
k.logger.Fatalw("host manager stopped", zap.Error(err))
}
}()
go func() {
err := k.virtualMgr.Start(ctx)
if err != nil {
k.logger.Fatalw("virtual manager stopped", zap.Error(err))
}
}()
// run the node async so that we can wait for it to be ready in another call
go func() {
ctx = log.WithLogger(ctx, k.logger)
if err := k.node.Run(ctx); err != nil {
k.logger.Fatalw("node errored when running", zap.Error(err))
}
}()
if err := k.node.WaitReady(context.Background(), time.Minute*1); err != nil {
k.logger.Fatalw("node was not ready within timeout of 1 minute", zap.Error(err))
}
<-k.node.Done()
if err := k.node.Err(); err != nil {
k.logger.Fatalw("node stopped with an error", zap.Error(err))
}
k.logger.Info("node exited successfully")
}
func (k *kubelet) newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version string) nodeutil.NewProviderFunc {
return func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) {
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, namespace, name, serverIP, dnsIP)
if err != nil {
return nil, nil, errors.New("unable to make nodeutil provider: " + err.Error())
}
provider.ConfigureNode(k.logger, pc.Node, hostname, k.port, agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, version)
return utilProvider, &provider.Node{}, nil
}
}
func (k *kubelet) nodeOpts(ctx context.Context, srvPort, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
return func(c *nodeutil.NodeConfig) error {
c.HTTPListenAddr = fmt.Sprintf(":%s", srvPort)
// set up the routes
mux := http.NewServeMux()
if err := nodeutil.AttachProviderRoutes(mux)(c); err != nil {
return errors.New("unable to attach routes: " + err.Error())
}
c.Handler = mux
tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, hostname, k.token, agentIP)
if err != nil {
return errors.New("unable to get tls config: " + err.Error())
}
c.TLSConfig = tlsConfig
return nil
}
}
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger *k3klog.Logger) (*rest.Config, error) {
if virtualConfigPath != "" {
return clientcmd.BuildConfigFromFlags("", virtualConfigPath)
}
// virtual kubeconfig file is empty, trying to fetch the k3k cluster kubeconfig
var cluster v1alpha1.Cluster
if err := hostClient.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, &cluster); err != nil {
return nil, err
}
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
var b *bootstrap.ControlRuntimeBootstrap
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
logger.Infow("decoded bootstrap", zap.Error(err))
return err
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())
}
adminCert, adminKey, err := certs.CreateClientCertKey(
controller.AdminCommonName, []string{user.SystemPrivilegedGroup},
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, time.Hour*24*time.Duration(356),
b.ClientCA.Content,
b.ClientCAKey.Content)
if err != nil {
return nil, err
}
url := fmt.Sprintf("https://%s:%d", server.ServiceName(cluster.Name), server.ServerPort)
kubeconfigData, err := kubeconfigBytes(url, []byte(b.ServerCA.Content), adminCert, adminKey)
if err != nil {
return nil, err
}
return clientcmd.RESTConfigFromKubeConfig(kubeconfigData)
}
func kubeconfigBytes(url string, serverCA, clientCert, clientKey []byte) ([]byte, error) {
config := clientcmdapi.NewConfig()
cluster := clientcmdapi.NewCluster()
cluster.CertificateAuthorityData = serverCA
cluster.Server = url
authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientCertificateData = clientCert
authInfo.ClientKeyData = clientKey
context := clientcmdapi.NewContext()
context.AuthInfo = "default"
context.Cluster = "default"
config.Clusters["default"] = cluster
config.AuthInfos["default"] = authInfo
config.Contexts["default"] = context
config.CurrentContext = "default"
return clientcmd.Write(*config)
}
func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
var (
cluster v1alpha1.Cluster
b *bootstrap.ControlRuntimeBootstrap
)
if err := hostClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster); err != nil {
return nil, err
}
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace)
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())
}
ip := net.ParseIP(agentIP)
altNames := certutil.AltNames{
DNSNames: []string{hostname},
IPs: []net.IP{ip},
}
cert, key, err := certs.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content)
if err != nil {
return nil, errors.New("unable to get cert and key: " + err.Error())
}
clientCert, err := tls.X509KeyPair(cert, key)
if err != nil {
return nil, errors.New("unable to get key pair: " + err.Error())
}
// create rootCA CertPool
certs, err := certutil.ParseCertsPEM([]byte(b.ServerCA.Content))
if err != nil {
return nil, errors.New("unable to create ca certs: " + err.Error())
}
if len(certs) < 1 {
return nil, errors.New("ca cert is not parsed correctly")
}
pool := x509.NewCertPool()
pool.AddCert(certs[0])
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}

128
k3k-kubelet/main.go Normal file
View File

@@ -0,0 +1,128 @@
package main
import (
"context"
"os"
"github.com/go-logr/zapr"
"github.com/rancher/k3k/pkg/log"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
"go.uber.org/zap"
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
)
var (
configFile string
cfg config
logger *log.Logger
debug bool
)
func main() {
app := cli.NewApp()
app.Name = "k3k-kubelet"
app.Usage = "virtual kubelet implementation k3k"
app.Flags = []cli.Flag{
&cli.StringFlag{
Name: "cluster-name",
Usage: "Name of the k3k cluster",
Destination: &cfg.ClusterName,
EnvVars: []string{"CLUSTER_NAME"},
},
&cli.StringFlag{
Name: "cluster-namespace",
Usage: "Namespace of the k3k cluster",
Destination: &cfg.ClusterNamespace,
EnvVars: []string{"CLUSTER_NAMESPACE"},
},
&cli.StringFlag{
Name: "cluster-token",
Usage: "K3S token of the k3k cluster",
Destination: &cfg.Token,
EnvVars: []string{"CLUSTER_TOKEN"},
},
&cli.StringFlag{
Name: "host-config-path",
Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config",
Destination: &cfg.HostConfigPath,
EnvVars: []string{"HOST_KUBECONFIG"},
},
&cli.StringFlag{
Name: "virtual-config-path",
Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster",
Destination: &cfg.VirtualConfigPath,
EnvVars: []string{"CLUSTER_NAME"},
},
&cli.StringFlag{
Name: "kubelet-port",
Usage: "kubelet API port number",
Destination: &cfg.KubeletPort,
EnvVars: []string{"SERVER_PORT"},
Value: "10250",
},
&cli.StringFlag{
Name: "agent-hostname",
Usage: "Agent Hostname used for TLS SAN for the kubelet server",
Destination: &cfg.AgentHostname,
EnvVars: []string{"AGENT_HOSTNAME"},
},
&cli.StringFlag{
Name: "server-ip",
Usage: "Server IP used for registering the virtual kubelet to the cluster",
Destination: &cfg.ServerIP,
EnvVars: []string{"SERVER_IP"},
},
&cli.StringFlag{
Name: "version",
Usage: "Version of kubernetes server",
Destination: &cfg.Version,
EnvVars: []string{"VERSION"},
},
&cli.StringFlag{
Name: "config",
Usage: "Path to k3k-kubelet config file",
Destination: &configFile,
EnvVars: []string{"CONFIG_FILE"},
Value: "/etc/rancher/k3k/config.yaml",
},
&cli.BoolFlag{
Name: "debug",
Usage: "Enable debug logging",
Destination: &debug,
EnvVars: []string{"DEBUG"},
},
}
app.Before = func(clx *cli.Context) error {
logger = log.New(debug)
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
return nil
}
app.Action = run
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}
func run(clx *cli.Context) error {
ctx := context.Background()
if err := cfg.parse(configFile); err != nil {
logger.Fatalw("failed to parse config file", "path", configFile, zap.Error(err))
}
if err := cfg.validate(); err != nil {
logger.Fatalw("failed to validate config", zap.Error(err))
}
k, err := newKubelet(ctx, &cfg, logger)
if err != nil {
logger.Fatalw("failed to create new virtual kubelet instance", zap.Error(err))
}
if err := k.registerNode(ctx, k.agentIP, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, cfg.ServerIP, k.dnsIP, cfg.Version); err != nil {
logger.Fatalw("failed to register new node", zap.Error(err))
}
k.start(ctx)
return nil
}

View File

@@ -0,0 +1,196 @@
/*
Copyright (c) Microsoft Corporation.
Licensed under the Apache 2.0 license.
See https://github.com/virtual-kubelet/azure-aci/tree/master/pkg/metrics/collectors
*/
package collectors
import (
"time"
stats "github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
compbasemetrics "k8s.io/component-base/metrics"
)
// defining metrics
var (
nodeCPUUsageDesc = compbasemetrics.NewDesc("node_cpu_usage_seconds_total",
"Cumulative cpu time consumed by the node in core-seconds",
nil,
nil,
compbasemetrics.ALPHA,
"")
nodeMemoryUsageDesc = compbasemetrics.NewDesc("node_memory_working_set_bytes",
"Current working set of the node in bytes",
nil,
nil,
compbasemetrics.ALPHA,
"")
containerCPUUsageDesc = compbasemetrics.NewDesc("container_cpu_usage_seconds_total",
"Cumulative cpu time consumed by the container in core-seconds",
[]string{"container", "pod", "namespace"},
nil,
compbasemetrics.ALPHA,
"")
containerMemoryUsageDesc = compbasemetrics.NewDesc("container_memory_working_set_bytes",
"Current working set of the container in bytes",
[]string{"container", "pod", "namespace"},
nil,
compbasemetrics.ALPHA,
"")
podCPUUsageDesc = compbasemetrics.NewDesc("pod_cpu_usage_seconds_total",
"Cumulative cpu time consumed by the pod in core-seconds",
[]string{"pod", "namespace"},
nil,
compbasemetrics.ALPHA,
"")
podMemoryUsageDesc = compbasemetrics.NewDesc("pod_memory_working_set_bytes",
"Current working set of the pod in bytes",
[]string{"pod", "namespace"},
nil,
compbasemetrics.ALPHA,
"")
resourceScrapeResultDesc = compbasemetrics.NewDesc("scrape_error",
"1 if there was an error while getting container metrics, 0 otherwise",
nil,
nil,
compbasemetrics.ALPHA,
"")
containerStartTimeDesc = compbasemetrics.NewDesc("container_start_time_seconds",
"Start time of the container since unix epoch in seconds",
[]string{"container", "pod", "namespace"},
nil,
compbasemetrics.ALPHA,
"")
)
// NewResourceMetricsCollector returns a metrics.StableCollector which exports resource metrics
func NewKubeletResourceMetricsCollector(podStats *stats.Summary) compbasemetrics.StableCollector {
return &resourceMetricsCollector{
providerPodStats: podStats,
}
}
type resourceMetricsCollector struct {
compbasemetrics.BaseStableCollector
providerPodStats *stats.Summary
}
// Check if resourceMetricsCollector implements necessary interface
var _ compbasemetrics.StableCollector = &resourceMetricsCollector{}
// DescribeWithStability implements compbasemetrics.StableCollector
func (rc *resourceMetricsCollector) DescribeWithStability(ch chan<- *compbasemetrics.Desc) {
ch <- nodeCPUUsageDesc
ch <- nodeMemoryUsageDesc
ch <- containerStartTimeDesc
ch <- containerCPUUsageDesc
ch <- containerMemoryUsageDesc
ch <- podCPUUsageDesc
ch <- podMemoryUsageDesc
ch <- resourceScrapeResultDesc
}
// CollectWithStability implements compbasemetrics.StableCollector
// Since new containers are frequently created and removed, using the Gauge would
// leak metric collectors for containers or pods that no longer exist. Instead, implement
// custom collector in a way that only collects metrics for active containers.
func (rc *resourceMetricsCollector) CollectWithStability(ch chan<- compbasemetrics.Metric) {
var errorCount float64
defer func() {
ch <- compbasemetrics.NewLazyConstMetric(resourceScrapeResultDesc, compbasemetrics.GaugeValue, errorCount)
}()
statsSummary := *rc.providerPodStats
rc.collectNodeCPUMetrics(ch, statsSummary.Node)
rc.collectNodeMemoryMetrics(ch, statsSummary.Node)
for _, pod := range statsSummary.Pods {
for _, container := range pod.Containers {
rc.collectContainerStartTime(ch, pod, container)
rc.collectContainerCPUMetrics(ch, pod, container)
rc.collectContainerMemoryMetrics(ch, pod, container)
}
rc.collectPodCPUMetrics(ch, pod)
rc.collectPodMemoryMetrics(ch, pod)
}
}
// implement collector methods and validate that correct data is used
func (rc *resourceMetricsCollector) collectNodeCPUMetrics(ch chan<- compbasemetrics.Metric, s stats.NodeStats) {
if s.CPU == nil || s.CPU.UsageCoreNanoSeconds == nil {
return
}
ch <- compbasemetrics.NewLazyMetricWithTimestamp(s.CPU.Time.Time,
compbasemetrics.NewLazyConstMetric(nodeCPUUsageDesc, compbasemetrics.CounterValue, float64(*s.CPU.UsageCoreNanoSeconds)/float64(time.Second)))
}
func (rc *resourceMetricsCollector) collectNodeMemoryMetrics(ch chan<- compbasemetrics.Metric, s stats.NodeStats) {
if s.Memory == nil || s.Memory.WorkingSetBytes == nil {
return
}
ch <- compbasemetrics.NewLazyMetricWithTimestamp(s.Memory.Time.Time,
compbasemetrics.NewLazyConstMetric(nodeMemoryUsageDesc, compbasemetrics.GaugeValue, float64(*s.Memory.WorkingSetBytes)))
}
func (rc *resourceMetricsCollector) collectContainerStartTime(ch chan<- compbasemetrics.Metric, pod stats.PodStats, s stats.ContainerStats) {
if s.StartTime.Unix() <= 0 {
return
}
ch <- compbasemetrics.NewLazyMetricWithTimestamp(s.StartTime.Time,
compbasemetrics.NewLazyConstMetric(containerStartTimeDesc, compbasemetrics.GaugeValue, float64(s.StartTime.UnixNano())/float64(time.Second), s.Name, pod.PodRef.Name, pod.PodRef.Namespace))
}
func (rc *resourceMetricsCollector) collectContainerCPUMetrics(ch chan<- compbasemetrics.Metric, pod stats.PodStats, s stats.ContainerStats) {
if s.CPU == nil || s.CPU.UsageCoreNanoSeconds == nil {
return
}
ch <- compbasemetrics.NewLazyMetricWithTimestamp(s.CPU.Time.Time,
compbasemetrics.NewLazyConstMetric(containerCPUUsageDesc, compbasemetrics.CounterValue,
float64(*s.CPU.UsageCoreNanoSeconds)/float64(time.Second), s.Name, pod.PodRef.Name, pod.PodRef.Namespace))
}
func (rc *resourceMetricsCollector) collectContainerMemoryMetrics(ch chan<- compbasemetrics.Metric, pod stats.PodStats, s stats.ContainerStats) {
if s.Memory == nil || s.Memory.WorkingSetBytes == nil {
return
}
ch <- compbasemetrics.NewLazyMetricWithTimestamp(s.Memory.Time.Time,
compbasemetrics.NewLazyConstMetric(containerMemoryUsageDesc, compbasemetrics.GaugeValue,
float64(*s.Memory.WorkingSetBytes), s.Name, pod.PodRef.Name, pod.PodRef.Namespace))
}
func (rc *resourceMetricsCollector) collectPodCPUMetrics(ch chan<- compbasemetrics.Metric, pod stats.PodStats) {
if pod.CPU == nil || pod.CPU.UsageCoreNanoSeconds == nil {
return
}
ch <- compbasemetrics.NewLazyMetricWithTimestamp(pod.CPU.Time.Time,
compbasemetrics.NewLazyConstMetric(podCPUUsageDesc, compbasemetrics.CounterValue,
float64(*pod.CPU.UsageCoreNanoSeconds)/float64(time.Second), pod.PodRef.Name, pod.PodRef.Namespace))
}
func (rc *resourceMetricsCollector) collectPodMemoryMetrics(ch chan<- compbasemetrics.Metric, pod stats.PodStats) {
if pod.Memory == nil || pod.Memory.WorkingSetBytes == nil {
return
}
ch <- compbasemetrics.NewLazyMetricWithTimestamp(pod.Memory.Time.Time,
compbasemetrics.NewLazyConstMetric(podMemoryUsageDesc, compbasemetrics.GaugeValue,
float64(*pod.Memory.WorkingSetBytes), pod.PodRef.Name, pod.PodRef.Namespace))
}

View File

@@ -0,0 +1,167 @@
package provider
import (
"context"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3klog "github.com/rancher/k3k/pkg/log"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string) {
node.Status.Conditions = nodeConditions()
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
node.Status.Addresses = []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: hostname,
},
{
Type: v1.NodeInternalIP,
Address: ip,
},
}
node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
node.Labels["kubernetes.io/os"] = "linux"
// configure versions
node.Status.NodeInfo.KubeletVersion = version
node.Status.NodeInfo.KubeProxyVersion = version
updateNodeCapacityInterval := 10 * time.Second
ticker := time.NewTicker(updateNodeCapacityInterval)
go func() {
for range ticker.C {
if err := updateNodeCapacity(coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
logger.Error("error updating node capacity", err)
}
}
}()
}
// nodeConditions returns the basic conditions which mark the node as ready
func nodeConditions() []v1.NodeCondition {
return []v1.NodeCondition{
{
Type: "Ready",
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletReady",
Message: "kubelet is ready.",
},
{
Type: "OutOfDisk",
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasSufficientDisk",
Message: "kubelet has sufficient disk space available",
},
{
Type: "MemoryPressure",
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasSufficientMemory",
Message: "kubelet has sufficient memory available",
},
{
Type: "DiskPressure",
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasNoDiskPressure",
Message: "kubelet has no disk pressure",
},
{
Type: "NetworkUnavailable",
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "RouteCreated",
Message: "RouteController created a route",
},
}
}
// updateNodeCapacity will update the virtual node capacity (and the allocatable field) with the sum of all the resource in the host nodes.
// If the nodeLabels are specified only the matching nodes will be considered.
func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error {
ctx := context.Background()
capacity, allocatable, err := getResourcesFromNodes(ctx, coreClient, nodeLabels)
if err != nil {
return err
}
var virtualNode corev1.Node
if err := virtualClient.Get(ctx, types.NamespacedName{Name: virtualNodeName}, &virtualNode); err != nil {
return err
}
virtualNode.Status.Capacity = capacity
virtualNode.Status.Allocatable = allocatable
return virtualClient.Status().Update(ctx, &virtualNode)
}
// getResourcesFromNodes will return a sum of all the resource capacity of the host nodes, and the allocatable resources.
// If some node labels are specified only the matching nodes will be considered.
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (v1.ResourceList, v1.ResourceList, error) {
listOpts := metav1.ListOptions{}
if nodeLabels != nil {
labelSelector := metav1.LabelSelector{MatchLabels: nodeLabels}
listOpts.LabelSelector = labels.Set(labelSelector.MatchLabels).String()
}
nodeList, err := coreClient.Nodes().List(ctx, listOpts)
if err != nil {
return nil, nil, err
}
// sum all
virtualCapacityResources := corev1.ResourceList{}
virtualAvailableResources := corev1.ResourceList{}
for _, node := range nodeList.Items {
// check if the node is Ready
for _, condition := range node.Status.Conditions {
if condition.Type != corev1.NodeReady {
continue
}
// if the node is not Ready then we can skip it
if condition.Status != corev1.ConditionTrue {
break
}
}
// add all the available metrics to the virtual node
for resourceName, resourceQuantity := range node.Status.Capacity {
virtualResource := virtualCapacityResources[resourceName]
(&virtualResource).Add(resourceQuantity)
virtualCapacityResources[resourceName] = virtualResource
}
for resourceName, resourceQuantity := range node.Status.Allocatable {
virtualResource := virtualAvailableResources[resourceName]
(&virtualResource).Add(resourceQuantity)
virtualAvailableResources[resourceName] = virtualResource
}
}
return virtualCapacityResources, virtualAvailableResources, nil
}

View File

@@ -0,0 +1,22 @@
package provider
import (
"context"
corev1 "k8s.io/api/core/v1"
)
// Node implements the node.Provider interface from Virtual Kubelet
type Node struct {
notifyCallback func(*corev1.Node)
}
// Ping is called to check if the node is healthy - in the current format it always is
func (n *Node) Ping(context.Context) error {
return nil
}
// NotifyNodeStatus sets the callback function for a node being changed. As of now, no changes are made
func (n *Node) NotifyNodeStatus(ctx context.Context, cb func(*corev1.Node)) {
n.notifyCallback = cb
}

View File

@@ -0,0 +1,828 @@
package provider
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
dto "github.com/prometheus/client_model/go"
"github.com/rancher/k3k/k3k-kubelet/controller"
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3klog "github.com/rancher/k3k/pkg/log"
"github.com/virtual-kubelet/virtual-kubelet/node/api"
"github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"errors"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/portforward"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/client-go/transport/spdy"
compbasemetrics "k8s.io/component-base/metrics"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
// check at compile time if the Provider implements the nodeutil.Provider interface
var _ nodeutil.Provider = (*Provider)(nil)
// Provider implements nodetuil.Provider from virtual Kubelet.
// TODO: Implement NotifyPods and the required usage so that this can be an async provider
type Provider struct {
Handler controller.ControllerHandler
Translater translate.ToHostTranslater
HostClient client.Client
VirtualClient client.Client
ClientConfig rest.Config
CoreClient cv1.CoreV1Interface
ClusterNamespace string
ClusterName string
serverIP string
dnsIP string
logger *k3klog.Logger
}
var (
ErrRetryTimeout = errors.New("provider timed out")
)
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3klog.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
coreClient, err := cv1.NewForConfig(&hostConfig)
if err != nil {
return nil, err
}
translater := translate.ToHostTranslater{
ClusterName: name,
ClusterNamespace: namespace,
}
p := Provider{
Handler: controller.ControllerHandler{
Mgr: virtualMgr,
Scheme: *virtualMgr.GetScheme(),
HostClient: hostMgr.GetClient(),
VirtualClient: virtualMgr.GetClient(),
Translater: translater,
Logger: logger,
},
HostClient: hostMgr.GetClient(),
VirtualClient: virtualMgr.GetClient(),
Translater: translater,
ClientConfig: hostConfig,
CoreClient: coreClient,
ClusterNamespace: namespace,
ClusterName: name,
logger: logger,
serverIP: serverIP,
dnsIP: dnsIP,
}
return &p, nil
}
// GetContainerLogs retrieves the logs of a container by name from the provider.
func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, containerName string, opts api.ContainerLogOpts) (io.ReadCloser, error) {
hostPodName := p.Translater.TranslateName(namespace, podName)
options := corev1.PodLogOptions{
Container: containerName,
Timestamps: opts.Timestamps,
Follow: opts.Follow,
Previous: opts.Previous,
}
if opts.Tail != 0 {
tailLines := int64(opts.Tail)
options.TailLines = &tailLines
}
if opts.LimitBytes != 0 {
limitBytes := int64(opts.LimitBytes)
options.LimitBytes = &limitBytes
}
if opts.SinceSeconds != 0 {
sinceSeconds := int64(opts.SinceSeconds)
options.SinceSeconds = &sinceSeconds
}
if !opts.SinceTime.IsZero() {
sinceTime := metav1.NewTime(opts.SinceTime)
options.SinceTime = &sinceTime
}
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
p.logger.Infof("got error %s when getting logs for %s in %s", err, hostPodName, p.ClusterNamespace)
return closer, err
}
// RunInContainer executes a command in a container in the pod, copying data
// between in/out/err and the container's stdin/stdout/stderr.
func (p *Provider) RunInContainer(ctx context.Context, namespace, podName, containerName string, cmd []string, attach api.AttachIO) error {
hostPodName := p.Translater.TranslateName(namespace, podName)
req := p.CoreClient.RESTClient().Post().
Resource("pods").
Name(hostPodName).
Namespace(p.ClusterNamespace).
SubResource("exec")
req.VersionedParams(&corev1.PodExecOptions{
Container: containerName,
Command: cmd,
TTY: attach.TTY(),
Stdin: attach.Stdin() != nil,
Stdout: attach.Stdout() != nil,
Stderr: attach.Stderr() != nil,
}, scheme.ParameterCodec)
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
if err != nil {
return err
}
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdin: attach.Stdin(),
Stdout: attach.Stdout(),
Stderr: attach.Stderr(),
Tty: attach.TTY(),
TerminalSizeQueue: &translatorSizeQueue{
resizeChan: attach.Resize(),
},
})
}
// AttachToContainer attaches to the executing process of a container in the pod, copying data
// between in/out/err and the container's stdin/stdout/stderr.
func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, containerName string, attach api.AttachIO) error {
hostPodName := p.Translater.TranslateName(namespace, podName)
req := p.CoreClient.RESTClient().Post().
Resource("pods").
Name(hostPodName).
Namespace(p.ClusterNamespace).
SubResource("attach")
req.VersionedParams(&corev1.PodAttachOptions{
Container: containerName,
TTY: attach.TTY(),
Stdin: attach.Stdin() != nil,
Stdout: attach.Stdout() != nil,
Stderr: attach.Stderr() != nil,
}, scheme.ParameterCodec)
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
if err != nil {
return err
}
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdin: attach.Stdin(),
Stdout: attach.Stdout(),
Stderr: attach.Stderr(),
Tty: attach.TTY(),
TerminalSizeQueue: &translatorSizeQueue{
resizeChan: attach.Resize(),
},
})
}
// GetStatsSummary gets the stats for the node, including running pods
func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary, error) {
p.logger.Debug("GetStatsSummary")
nodeList := &v1.NodeList{}
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
return nil, fmt.Errorf("unable to get nodes of cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
}
// fetch the stats from all the nodes
var nodeStats statsv1alpha1.NodeStats
var allPodsStats []statsv1alpha1.PodStats
for _, n := range nodeList.Items {
res, err := p.CoreClient.RESTClient().
Get().
Resource("nodes").
Name(n.Name).
SubResource("proxy").
Suffix("stats/summary").
DoRaw(ctx)
if err != nil {
return nil, fmt.Errorf(
"unable to get stats of node '%s', from cluster %s in namespace %s: %w",
n.Name, p.ClusterName, p.ClusterNamespace, err,
)
}
stats := &statsv1alpha1.Summary{}
if err := json.Unmarshal(res, stats); err != nil {
return nil, err
}
// TODO: we should probably calculate somehow the node stats from the different nodes of the host
// or reflect different nodes from the virtual kubelet.
// For the moment let's just pick one random node stats.
nodeStats = stats.Node
allPodsStats = append(allPodsStats, stats.Pods...)
}
pods, err := p.GetPods(ctx)
if err != nil {
return nil, err
}
podsNameMap := make(map[string]*v1.Pod)
for _, pod := range pods {
hostPodName := p.Translater.TranslateName(pod.Namespace, pod.Name)
podsNameMap[hostPodName] = pod
}
filteredStats := &statsv1alpha1.Summary{
Node: nodeStats,
Pods: make([]statsv1alpha1.PodStats, 0),
}
for _, podStat := range allPodsStats {
// skip pods that are not in the cluster namespace
if podStat.PodRef.Namespace != p.ClusterNamespace {
continue
}
// rewrite the PodReference to match the data of the virtual cluster
if pod, found := podsNameMap[podStat.PodRef.Name]; found {
podStat.PodRef = statsv1alpha1.PodReference{
Name: pod.Name,
Namespace: pod.Namespace,
UID: string(pod.UID),
}
filteredStats.Pods = append(filteredStats.Pods, podStat)
}
}
return filteredStats, nil
}
// GetMetricsResource gets the metrics for the node, including running pods
func (p *Provider) GetMetricsResource(ctx context.Context) ([]*dto.MetricFamily, error) {
statsSummary, err := p.GetStatsSummary(ctx)
if err != nil {
return nil, errors.Join(err, errors.New("error fetching MetricsResource"))
}
registry := compbasemetrics.NewKubeRegistry()
registry.CustomMustRegister(collectors.NewKubeletResourceMetricsCollector(statsSummary))
metricFamily, err := registry.Gather()
if err != nil {
return nil, errors.Join(err, errors.New("error gathering metrics from collector"))
}
return metricFamily, nil
}
// PortForward forwards a local port to a port on the pod
func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port int32, stream io.ReadWriteCloser) error {
hostPodName := p.Translater.TranslateName(namespace, pod)
req := p.CoreClient.RESTClient().Post().
Resource("pods").
Name(hostPodName).
Namespace(p.ClusterNamespace).
SubResource("portforward")
transport, upgrader, err := spdy.RoundTripperFor(&p.ClientConfig)
if err != nil {
return err
}
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, req.URL())
portAsString := strconv.Itoa(int(port))
readyChannel := make(chan struct{})
stopChannel := make(chan struct{}, 1)
// Today this doesn't work properly. When the port ward is supposed to stop, the caller (this provider)
// should send a value on stopChannel so that the PortForward is stopped. However, we only have a ReadWriteCloser
// so more work is needed to detect a close and handle that appropriately.
fw, err := portforward.New(dialer, []string{portAsString}, stopChannel, readyChannel, stream, stream)
if err != nil {
return err
}
return fw.ForwardPorts()
}
// CreatePod executes createPod with retry
func (p *Provider) CreatePod(ctx context.Context, pod *corev1.Pod) error {
return p.withRetry(ctx, p.createPod, pod)
}
// createPod takes a Kubernetes Pod and deploys it within the provider.
func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
tPod := pod.DeepCopy()
p.Translater.TranslateTo(tPod)
// get Cluster definition
clusterKey := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: p.ClusterName,
}
var cluster v1alpha1.Cluster
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
return fmt.Errorf("unable to get cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
}
// these values shouldn't be set on create
tPod.UID = ""
tPod.ResourceVersion = ""
// the node was scheduled on the virtual kubelet, but leaving it this way will make it pending indefinitely
tPod.Spec.NodeName = ""
tPod.Spec.NodeSelector = cluster.Spec.NodeSelector
// if the priorityCluss for the virtual cluster is set then override the provided value
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
if cluster.Spec.PriorityClass != "" {
tPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
tPod.Spec.Priority = nil
}
// fieldpath annotations
if err := p.configureFieldPathEnv(pod, tPod); err != nil {
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
// volumes will often refer to resources in the virtual cluster, but instead need to refer to the sync'd
// host cluster version
if err := p.transformVolumes(ctx, pod.Namespace, tPod.Spec.Volumes); err != nil {
return fmt.Errorf("unable to sync volumes for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
// sync serviceaccount token to a the host cluster
if err := p.transformTokens(ctx, pod, tPod); err != nil {
return fmt.Errorf("unable to transform tokens for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
// inject networking information to the pod including the virtual cluster controlplane endpoint
p.configureNetworking(pod.Name, pod.Namespace, tPod, p.serverIP)
p.logger.Infow("Creating pod", "Host Namespace", tPod.Namespace, "Host Name", tPod.Name,
"Virtual Namespace", pod.Namespace, "Virtual Name", "env", pod.Name, pod.Spec.Containers[0].Env)
return p.HostClient.Create(ctx, tPod)
}
// withRetry retries passed function with interval and timeout
func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Pod) error, pod *v1.Pod) error {
const (
interval = 2 * time.Second
timeout = 10 * time.Second
)
var allErrors error
// retryFn will retry until the operation succeed, or the timeout occurs
retryFn := func(ctx context.Context) (bool, error) {
if lastErr := f(ctx, pod); lastErr != nil {
// log that the retry failed?
allErrors = errors.Join(allErrors, lastErr)
return false, nil
}
return true, nil
}
if err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, retryFn); err != nil {
return errors.Join(allErrors, ErrRetryTimeout)
}
return nil
}
// transformVolumes changes the volumes to the representation in the host cluster. Will return an error
// if one/more volumes couldn't be transformed
func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, volumes []corev1.Volume) error {
for _, volume := range volumes {
var optional bool
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
continue
}
// note: this needs to handle downward api volumes as well, but more thought is needed on how to do that
if volume.ConfigMap != nil {
if volume.ConfigMap.Optional != nil {
optional = *volume.ConfigMap.Optional
}
if err := p.syncConfigmap(ctx, podNamespace, volume.ConfigMap.Name, optional); err != nil {
return fmt.Errorf("unable to sync configmap volume %s: %w", volume.Name, err)
}
volume.ConfigMap.Name = p.Translater.TranslateName(podNamespace, volume.ConfigMap.Name)
} else if volume.Secret != nil {
if volume.Secret.Optional != nil {
optional = *volume.Secret.Optional
}
if err := p.syncSecret(ctx, podNamespace, volume.Secret.SecretName, optional); err != nil {
return fmt.Errorf("unable to sync secret volume %s: %w", volume.Name, err)
}
volume.Secret.SecretName = p.Translater.TranslateName(podNamespace, volume.Secret.SecretName)
} else if volume.Projected != nil {
for _, source := range volume.Projected.Sources {
if source.ConfigMap != nil {
if source.ConfigMap.Optional != nil {
optional = *source.ConfigMap.Optional
}
configMapName := source.ConfigMap.Name
if err := p.syncConfigmap(ctx, podNamespace, configMapName, optional); err != nil {
return fmt.Errorf("unable to sync projected configmap %s: %w", configMapName, err)
}
source.ConfigMap.Name = p.Translater.TranslateName(podNamespace, configMapName)
} else if source.Secret != nil {
if source.Secret.Optional != nil {
optional = *source.Secret.Optional
}
secretName := source.Secret.Name
if err := p.syncSecret(ctx, podNamespace, secretName, optional); err != nil {
return fmt.Errorf("unable to sync projected secret %s: %w", secretName, err)
}
}
}
} else if volume.PersistentVolumeClaim != nil {
volume.PersistentVolumeClaim.ClaimName = p.Translater.TranslateName(podNamespace, volume.PersistentVolumeClaim.ClaimName)
}
}
return nil
}
// syncConfigmap will add the configmap object to the queue of the syncer controller to be synced to the host cluster
func (p *Provider) syncConfigmap(ctx context.Context, podNamespace string, configMapName string, optional bool) error {
var configMap corev1.ConfigMap
nsName := types.NamespacedName{
Namespace: podNamespace,
Name: configMapName,
}
err := p.VirtualClient.Get(ctx, nsName, &configMap)
if err != nil {
// check if its optional configmap
if apierrors.IsNotFound(err) && optional {
return nil
}
return fmt.Errorf("unable to get configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
err = p.Handler.AddResource(ctx, &configMap)
if err != nil {
return fmt.Errorf("unable to add configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
return nil
}
// syncSecret will add the secret object to the queue of the syncer controller to be synced to the host cluster
func (p *Provider) syncSecret(ctx context.Context, podNamespace string, secretName string, optional bool) error {
p.logger.Infow("Syncing secret", "Name", secretName, "Namespace", podNamespace, "optional", optional)
var secret corev1.Secret
nsName := types.NamespacedName{
Namespace: podNamespace,
Name: secretName,
}
err := p.VirtualClient.Get(ctx, nsName, &secret)
if err != nil {
if apierrors.IsNotFound(err) && optional {
return nil
}
return fmt.Errorf("unable to get secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
err = p.Handler.AddResource(ctx, &secret)
if err != nil {
return fmt.Errorf("unable to add secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
return nil
}
// UpdatePod executes updatePod with retry
func (p *Provider) UpdatePod(ctx context.Context, pod *corev1.Pod) error {
return p.withRetry(ctx, p.updatePod, pod)
}
func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
p.logger.Debugw("got a request for update pod")
// Once scheduled a Pod cannot update other fields than the image of the containers, initcontainers and a few others
// See: https://kubernetes.io/docs/concepts/workloads/pods/#pod-update-and-replacement
// Update Pod in the virtual cluster
var currentVirtualPod v1.Pod
if err := p.VirtualClient.Get(ctx, client.ObjectKeyFromObject(pod), &currentVirtualPod); err != nil {
return fmt.Errorf("unable to get pod to update from virtual cluster: %w", err)
}
currentVirtualPod.Spec.Containers = updateContainerImages(currentVirtualPod.Spec.Containers, pod.Spec.Containers)
currentVirtualPod.Spec.InitContainers = updateContainerImages(currentVirtualPod.Spec.InitContainers, pod.Spec.InitContainers)
currentVirtualPod.Spec.ActiveDeadlineSeconds = pod.Spec.ActiveDeadlineSeconds
currentVirtualPod.Spec.Tolerations = pod.Spec.Tolerations
// in the virtual cluster we can update also the labels and annotations
currentVirtualPod.Annotations = pod.Annotations
currentVirtualPod.Labels = pod.Labels
if err := p.VirtualClient.Update(ctx, &currentVirtualPod); err != nil {
return fmt.Errorf("unable to update pod in the virtual cluster: %w", err)
}
// Update Pod in the host cluster
hostNamespaceName := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: p.Translater.TranslateName(pod.Namespace, pod.Name),
}
var currentHostPod corev1.Pod
if err := p.HostClient.Get(ctx, hostNamespaceName, &currentHostPod); err != nil {
return fmt.Errorf("unable to get pod to update from host cluster: %w", err)
}
currentHostPod.Spec.Containers = updateContainerImages(currentHostPod.Spec.Containers, pod.Spec.Containers)
currentHostPod.Spec.InitContainers = updateContainerImages(currentHostPod.Spec.InitContainers, pod.Spec.InitContainers)
// update ActiveDeadlineSeconds and Tolerations
currentHostPod.Spec.ActiveDeadlineSeconds = pod.Spec.ActiveDeadlineSeconds
currentHostPod.Spec.Tolerations = pod.Spec.Tolerations
if err := p.HostClient.Update(ctx, &currentHostPod); err != nil {
return fmt.Errorf("unable to update pod in the host cluster: %w", err)
}
return nil
}
// updateContainerImages will update the images of the original container images with the same name
func updateContainerImages(original, updated []v1.Container) []v1.Container {
newImages := make(map[string]string)
for _, c := range updated {
newImages[c.Name] = c.Image
}
for i, c := range original {
if updatedImage, found := newImages[c.Name]; found {
original[i].Image = updatedImage
}
}
return original
}
// DeletePod executes deletePod with retry
func (p *Provider) DeletePod(ctx context.Context, pod *corev1.Pod) error {
return p.withRetry(ctx, p.deletePod, pod)
}
// deletePod takes a Kubernetes Pod and deletes it from the provider. Once a pod is deleted, the provider is
// expected to call the NotifyPods callback with a terminal pod status where all the containers are in a terminal
// state, as well as the pod. DeletePod may be called multiple times for the same pod.
func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
p.logger.Infof("Got request to delete pod %s", pod.Name)
hostName := p.Translater.TranslateName(pod.Namespace, pod.Name)
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostName, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("unable to delete pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
if err = p.pruneUnusedVolumes(ctx, pod); err != nil {
// note that we don't return an error here. The pod was sucessfully deleted, another process
// should clean this without affecting the user
p.logger.Errorf("failed to prune leftover volumes for %s/%s: %w, resources may be left", pod.Namespace, pod.Name, err)
}
p.logger.Infof("Deleted pod %s", pod.Name)
return nil
}
// pruneUnusedVolumes removes volumes in use by pod that aren't used by any other pods
func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) error {
rawSecrets, rawConfigMaps := getSecretsAndConfigmaps(pod)
// since this pod was removed, originally mark all of the secrets/configmaps it uses as eligible
// for pruning
pruneSecrets := sets.Set[string]{}.Insert(rawSecrets...)
pruneConfigMap := sets.Set[string]{}.Insert(rawConfigMaps...)
var pods corev1.PodList
// only pods in the same namespace could be using secrets/configmaps that this pod is using
err := p.VirtualClient.List(ctx, &pods, &client.ListOptions{
Namespace: pod.Namespace,
})
if err != nil {
return fmt.Errorf("unable to list pods: %w", err)
}
for _, vPod := range pods.Items {
if vPod.Name == pod.Name {
continue
}
secrets, configMaps := getSecretsAndConfigmaps(&vPod)
pruneSecrets.Delete(secrets...)
pruneConfigMap.Delete(configMaps...)
}
for _, secretName := range pruneSecrets.UnsortedList() {
var secret corev1.Secret
err := p.VirtualClient.Get(ctx, types.NamespacedName{
Name: secretName,
Namespace: pod.Namespace,
}, &secret)
if err != nil {
return fmt.Errorf("unable to get secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
}
err = p.Handler.RemoveResource(ctx, &secret)
if err != nil {
return fmt.Errorf("unable to remove secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
}
}
for _, configMapName := range pruneConfigMap.UnsortedList() {
var configMap corev1.ConfigMap
err := p.VirtualClient.Get(ctx, types.NamespacedName{
Name: configMapName,
Namespace: pod.Namespace,
}, &configMap)
if err != nil {
return fmt.Errorf("unable to get configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
}
if err = p.Handler.RemoveResource(ctx, &configMap); err != nil {
return fmt.Errorf("unable to remove configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
}
}
return nil
}
// GetPod retrieves a pod by name from the provider (can be cached).
// The Pod returned is expected to be immutable, and may be accessed
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.Pod, error) {
p.logger.Debugw("got a request for get pod", "Namespace", namespace, "Name", name)
hostNamespaceName := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: p.Translater.TranslateName(namespace, name),
}
var pod corev1.Pod
err := p.HostClient.Get(ctx, hostNamespaceName, &pod)
if err != nil {
return nil, fmt.Errorf("error when retrieving pod: %w", err)
}
p.Translater.TranslateFrom(&pod)
return &pod, nil
}
// GetPodStatus retrieves the status of a pod by name from the provider.
// The PodStatus returned is expected to be immutable, and may be accessed
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*corev1.PodStatus, error) {
p.logger.Debugw("got a request for pod status", "Namespace", namespace, "Name", name)
pod, err := p.GetPod(ctx, namespace, name)
if err != nil {
return nil, fmt.Errorf("unable to get pod for status: %w", err)
}
p.logger.Debugw("got pod status", "Namespace", namespace, "Name", name, "Status", pod.Status)
return pod.Status.DeepCopy(), nil
}
// GetPods retrieves a list of all pods running on the provider (can be cached).
// The Pods returned are expected to be immutable, and may be accessed
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
selector := labels.NewSelector()
requirement, err := labels.NewRequirement(translate.ClusterNameLabel, selection.Equals, []string{p.ClusterName})
if err != nil {
return nil, fmt.Errorf("unable to create label selector: %w", err)
}
selector = selector.Add(*requirement)
var podList corev1.PodList
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
if err != nil {
return nil, fmt.Errorf("unable to list pods: %w", err)
}
retPods := []*corev1.Pod{}
for _, pod := range podList.DeepCopy().Items {
p.Translater.TranslateFrom(&pod)
retPods = append(retPods, &pod)
}
return retPods, nil
}
// configureNetworking will inject network information to each pod to connect them to the
// virtual cluster api server, as well as confiugre DNS information to connect them to the
// synced coredns on the host cluster.
func (p *Provider) configureNetworking(podName, podNamespace string, pod *corev1.Pod, serverIP string) {
// inject serverIP to hostalias for the pod
KubernetesHostAlias := corev1.HostAlias{
IP: serverIP,
Hostnames: []string{"kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local"},
}
pod.Spec.HostAliases = append(pod.Spec.HostAliases, KubernetesHostAlias)
// inject networking information to the pod's environment variables
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env,
corev1.EnvVar{
Name: "KUBERNETES_PORT_443_TCP",
Value: "tcp://" + p.serverIP + ":6443",
},
corev1.EnvVar{
Name: "KUBERNETES_PORT",
Value: "tcp://" + p.serverIP + ":6443",
},
corev1.EnvVar{
Name: "KUBERNETES_PORT_443_TCP_ADDR",
Value: p.serverIP,
},
corev1.EnvVar{
Name: "KUBERNETES_SERVICE_HOST",
Value: p.serverIP,
},
corev1.EnvVar{
Name: "KUBERNETES_SERVICE_PORT",
Value: "6443",
},
)
}
// handle init contianers as well
for i := range pod.Spec.InitContainers {
pod.Spec.InitContainers[i].Env = append(pod.Spec.InitContainers[i].Env,
corev1.EnvVar{
Name: "KUBERNETES_PORT_443_TCP",
Value: "tcp://" + p.serverIP + ":6443",
},
corev1.EnvVar{
Name: "KUBERNETES_PORT",
Value: "tcp://" + p.serverIP + ":6443",
},
corev1.EnvVar{
Name: "KUBERNETES_PORT_443_TCP_ADDR",
Value: p.serverIP,
},
corev1.EnvVar{
Name: "KUBERNETES_SERVICE_HOST",
Value: p.serverIP,
},
corev1.EnvVar{
Name: "KUBERNETES_SERVICE_PORT",
Value: "6443",
},
)
}
// injecting cluster DNS IP to the pods except for coredns pod
if !strings.HasPrefix(podName, "coredns") {
pod.Spec.DNSPolicy = corev1.DNSNone
pod.Spec.DNSConfig = &corev1.PodDNSConfig{
Nameservers: []string{
p.dnsIP,
},
Searches: []string{
podNamespace + ".svc.cluster.local", "svc.cluster.local", "cluster.local",
},
}
}
}
// getSecretsAndConfigmaps retrieves a list of all secrets/configmaps that are in use by a given pod. Useful
// for removing/seeing which virtual cluster resources need to be in the host cluster.
func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
var secrets []string
var configMaps []string
for _, volume := range pod.Spec.Volumes {
if volume.Secret != nil {
secrets = append(secrets, volume.Secret.SecretName)
} else if volume.ConfigMap != nil {
configMaps = append(configMaps, volume.ConfigMap.Name)
} else if volume.Projected != nil {
for _, source := range volume.Projected.Sources {
if source.ConfigMap != nil {
configMaps = append(configMaps, source.ConfigMap.Name)
} else if source.Secret != nil {
secrets = append(secrets, source.Secret.Name)
}
}
}
}
return secrets, configMaps
}
// fetchFieldPathAnnotations will retrieve all annotations created by the pod mutator webhook
// to assign env fieldpaths to pods
func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
for name, value := range pod.Annotations {
if strings.Contains(name, webhook.FieldpathField) {
containerIndex, envName, err := webhook.ParseFieldPathAnnotationKey(name)
if err != nil {
return err
}
// re-adding these envs to the pod
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, v1.EnvVar{
Name: envName,
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: value,
},
},
})
// removing the annotation from the pod
delete(tPod.Annotations, name)
}
}
return nil
}

View File

@@ -0,0 +1,150 @@
package provider
import (
"context"
"fmt"
"strings"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
)
const (
kubeAPIAccessPrefix = "kube-api-access"
serviceAccountTokenMountPath = "/var/run/secrets/kubernetes.io/serviceaccount"
)
// transformTokens copies the serviceaccount tokens used by pod's serviceaccount to a secret on the host cluster and mount it
// to look like the serviceaccount token
func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error {
p.logger.Infow("transforming token", "Pod", pod.Name, "Namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
// skip this process if the kube-api-access is already removed from the pod
// this is needed in case users already adds their own custom tokens like in rancher imported clusters
if !isKubeAccessVolumeFound(pod) {
return nil
}
virtualSecretName := k3kcontroller.SafeConcatNameWithPrefix(pod.Spec.ServiceAccountName, "token")
virtualSecret := virtualSecret(virtualSecretName, pod.Namespace, pod.Spec.ServiceAccountName)
if err := p.VirtualClient.Create(ctx, virtualSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// extracting the tokens data from the secret we just created
virtualSecretKey := types.NamespacedName{
Name: virtualSecret.Name,
Namespace: virtualSecret.Namespace,
}
if err := p.VirtualClient.Get(ctx, virtualSecretKey, virtualSecret); err != nil {
return err
}
// To avoid race conditions we need to check if the secret's data has been populated
// including the token, ca.crt and namespace
if len(virtualSecret.Data) < 3 {
return fmt.Errorf("token secret %s/%s data is empty", virtualSecret.Namespace, virtualSecret.Name)
}
hostSecret := virtualSecret.DeepCopy()
hostSecret.Type = ""
hostSecret.Annotations = make(map[string]string)
p.Translater.TranslateTo(hostSecret)
if err := p.HostClient.Create(ctx, hostSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
p.translateToken(tPod, hostSecret.Name)
return nil
}
func virtualSecret(name, namespace, serviceAccountName string) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
corev1.ServiceAccountNameKey: serviceAccountName,
},
},
Type: corev1.SecretTypeServiceAccountToken,
}
}
// translateToken will remove the serviceaccount from the pod and replace the kube-api-access volume
// with a custom token volume and mount it to all containers within the pod
func (p *Provider) translateToken(pod *corev1.Pod, hostSecretName string) {
pod.Spec.ServiceAccountName = ""
pod.Spec.DeprecatedServiceAccount = ""
pod.Spec.AutomountServiceAccountToken = ptr.To(false)
removeKubeAccessVolume(pod)
addKubeAccessVolume(pod, hostSecretName)
}
func isKubeAccessVolumeFound(pod *corev1.Pod) bool {
for _, volume := range pod.Spec.Volumes {
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
return true
}
}
return false
}
func removeKubeAccessVolume(pod *corev1.Pod) {
for i, volume := range pod.Spec.Volumes {
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
pod.Spec.Volumes = append(pod.Spec.Volumes[:i], pod.Spec.Volumes[i+1:]...)
}
}
// init containers
for i, container := range pod.Spec.InitContainers {
for j, mountPath := range container.VolumeMounts {
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts[:j], pod.Spec.InitContainers[i].VolumeMounts[j+1:]...)
}
}
}
for i, container := range pod.Spec.Containers {
for j, mountPath := range container.VolumeMounts {
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts[:j], pod.Spec.Containers[i].VolumeMounts[j+1:]...)
}
}
}
}
func addKubeAccessVolume(pod *corev1.Pod, hostSecretName string) {
var tokenVolumeName = k3kcontroller.SafeConcatNameWithPrefix(kubeAPIAccessPrefix)
pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{
Name: tokenVolumeName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: hostSecretName,
},
},
})
for i := range pod.Spec.InitContainers {
pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts, corev1.VolumeMount{
Name: tokenVolumeName,
MountPath: serviceAccountTokenMountPath,
})
}
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts, corev1.VolumeMount{
Name: tokenVolumeName,
MountPath: serviceAccountTokenMountPath,
})
}
}

View File

@@ -0,0 +1,25 @@
package provider
import (
"github.com/virtual-kubelet/virtual-kubelet/node/api"
"k8s.io/client-go/tools/remotecommand"
)
// translatorSizeQueue feeds the size events from the WebSocket
// resizeChan into the SPDY client input. Implements TerminalSizeQueue
// interface.
type translatorSizeQueue struct {
resizeChan <-chan api.TermSize
}
func (t *translatorSizeQueue) Next() *remotecommand.TerminalSize {
size, ok := <-t.resizeChan
if !ok {
return nil
}
newSize := remotecommand.TerminalSize{
Width: size.Width,
Height: size.Height,
}
return &newSize
}

View File

@@ -0,0 +1,106 @@
package translate
import (
"encoding/hex"
"fmt"
"github.com/rancher/k3k/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
// ClusterNameLabel is the key for the label that contains the name of the virtual cluster
// this resource was made in
ClusterNameLabel = "k3k.io/clusterName"
// ResourceNameAnnotation is the key for the annotation that contains the original name of this
// resource in the virtual cluster
ResourceNameAnnotation = "k3k.io/name"
// ResourceNamespaceAnnotation is the key for the annotation that contains the original namespace of this
// resource in the virtual cluster
ResourceNamespaceAnnotation = "k3k.io/namespace"
)
type ToHostTranslater struct {
// ClusterName is the name of the virtual cluster whose resources we are
// translating to a host cluster
ClusterName string
// ClusterNamespace is the namespace of the virtual cluster whose resources
// we are tranlsating to a host cluster
ClusterNamespace string
}
// Translate translates a virtual cluster object to a host cluster object. This should only be used for
// static resources such as configmaps/secrets, and not for things like pods (which can reference other
// objects). Note that this won't set host-cluster values (like resource version) so when updating you
// may need to fetch the existing value and do some combination before using this.
func (t *ToHostTranslater) TranslateTo(obj client.Object) {
// owning objects may be in the virtual cluster, but may not be in the host cluster
obj.SetOwnerReferences(nil)
// add some annotations to make it easier to track source object
annotations := obj.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[ResourceNameAnnotation] = obj.GetName()
annotations[ResourceNamespaceAnnotation] = obj.GetNamespace()
obj.SetAnnotations(annotations)
// add a label to quickly identify objects owned by a given virtual cluster
labels := obj.GetLabels()
if labels == nil {
labels = map[string]string{}
}
labels[ClusterNameLabel] = t.ClusterName
obj.SetLabels(labels)
// resource version/UID won't match what's in the host cluster.
obj.SetResourceVersion("")
obj.SetUID("")
// set the name and the namespace so that this goes in the proper host namespace
// and doesn't collide with other resources
obj.SetName(t.TranslateName(obj.GetNamespace(), obj.GetName()))
obj.SetNamespace(t.ClusterNamespace)
obj.SetFinalizers(nil)
}
func (t *ToHostTranslater) TranslateFrom(obj client.Object) {
// owning objects may be in the virtual cluster, but may not be in the host cluster
obj.SetOwnerReferences(nil)
// remove the annotations added to track original name
annotations := obj.GetAnnotations()
// TODO: It's possible that this was erased by a change on the host cluster
// In this case, we need to have some sort of fallback or error return
name := annotations[ResourceNameAnnotation]
namespace := annotations[ResourceNamespaceAnnotation]
obj.SetName(name)
obj.SetNamespace(namespace)
delete(annotations, ResourceNameAnnotation)
delete(annotations, ResourceNamespaceAnnotation)
obj.SetAnnotations(annotations)
// remove the clusteName tracking label
labels := obj.GetLabels()
delete(labels, ClusterNameLabel)
obj.SetLabels(labels)
// resource version/UID won't match what's in the virtual cluster.
obj.SetResourceVersion("")
obj.SetUID("")
}
// TranslateName returns the name of the resource in the host cluster. Will not update the object with this name.
func (t *ToHostTranslater) TranslateName(namespace string, name string) string {
// we need to come up with a name which is:
// - somewhat connectable to the original resource
// - a valid k8s name
// - idempotently calculatable
// - unique for this combination of name/namespace/cluster
namePrefix := fmt.Sprintf("%s-%s-%s", name, namespace, t.ClusterName)
// use + as a separator since it can't be in an object name
nameKey := fmt.Sprintf("%s+%s+%s", name, namespace, t.ClusterName)
// it's possible that the suffix will be in the name, so we use hex to make it valid for k8s
nameSuffix := hex.EncodeToString([]byte(nameKey))
return controller.SafeConcatName(namePrefix, nameSuffix)
}

128
main.go
View File

@@ -3,50 +3,148 @@ package main
import (
"context"
"flag"
"errors"
"fmt"
"os"
"github.com/go-logr/zapr"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/clusterset"
"github.com/rancher/k3k/pkg/log"
"github.com/urfave/cli/v2"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
ctrlconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
var Scheme = runtime.NewScheme()
var (
scheme = runtime.NewScheme()
clusterCIDR string
sharedAgentImage string
sharedAgentImagePullPolicy string
kubeconfig string
debug bool
logger *log.Logger
flags = []cli.Flag{
&cli.StringFlag{
Name: "kubeconfig",
EnvVars: []string{"KUBECONFIG"},
Usage: "Kubeconfig path",
Destination: &kubeconfig,
},
&cli.StringFlag{
Name: "cluster-cidr",
EnvVars: []string{"CLUSTER_CIDR"},
Usage: "Cluster CIDR to be added to the networkpolicy of the clustersets",
Destination: &clusterCIDR,
},
&cli.StringFlag{
Name: "shared-agent-image",
EnvVars: []string{"SHARED_AGENT_IMAGE"},
Usage: "K3K Virtual Kubelet image",
Value: "rancher/k3k:k3k-kubelet-dev",
Destination: &sharedAgentImage,
},
&cli.StringFlag{
Name: "shared-agent-pull-policy",
EnvVars: []string{"SHARED_AGENT_PULL_POLICY"},
Usage: "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never",
Destination: &sharedAgentImagePullPolicy,
},
&cli.BoolFlag{
Name: "debug",
EnvVars: []string{"DEBUG"},
Usage: "Debug level logging",
Destination: &debug,
},
}
)
func init() {
_ = clientgoscheme.AddToScheme(Scheme)
_ = v1alpha1.AddToScheme(Scheme)
_ = clientgoscheme.AddToScheme(scheme)
_ = v1alpha1.AddToScheme(scheme)
}
func main() {
ctrlconfig.RegisterFlags(nil)
flag.Parse()
app := cmds.NewApp()
app.Flags = flags
app.Action = run
app.Version = buildinfo.Version
app.Before = func(clx *cli.Context) error {
if err := validate(); err != nil {
return err
}
logger = log.New(debug)
return nil
}
if err := app.Run(os.Args); err != nil {
logger.Fatalw("failed to run k3k controller", zap.Error(err))
}
}
func run(clx *cli.Context) error {
ctx := context.Background()
kubeconfig := flag.Lookup("kubeconfig").Value.String()
logger.Info("Starting k3k - Version: " + buildinfo.Version)
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
klog.Fatalf("Failed to create config from kubeconfig file: %v", err)
return fmt.Errorf("failed to create config from kubeconfig file: %v", err)
}
mgr, err := ctrl.NewManager(restConfig, manager.Options{
Scheme: Scheme,
Scheme: scheme,
})
if err != nil {
klog.Fatalf("Failed to create new controller runtime manager: %v", err)
return fmt.Errorf("failed to create new controller runtime manager: %v", err)
}
if err := cluster.Add(ctx, mgr); err != nil {
klog.Fatalf("Failed to add the new controller: %v", err)
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
logger.Info("adding cluster controller")
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy, logger); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
}
logger.Info("adding etcd pod controller")
if err := cluster.AddPodController(ctx, mgr, logger); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
}
logger.Info("adding clusterset controller")
if err := clusterset.Add(ctx, mgr, clusterCIDR, logger); err != nil {
return fmt.Errorf("failed to add the clusterset controller: %v", err)
}
if clusterCIDR == "" {
logger.Info("adding networkpolicy node controller")
if err := clusterset.AddNodeController(ctx, mgr, logger); err != nil {
return fmt.Errorf("failed to add the clusterset node controller: %v", err)
}
}
if err := mgr.Start(ctx); err != nil {
klog.Fatalf("Failed to start the manager: %v", err)
return fmt.Errorf("failed to start the manager: %v", err)
}
return nil
}
func validate() error {
if sharedAgentImagePullPolicy != "" {
if sharedAgentImagePullPolicy != string(v1.PullAlways) &&
sharedAgentImagePullPolicy != string(v1.PullIfNotPresent) &&
sharedAgentImagePullPolicy != string(v1.PullNever) {
return errors.New("invalid value for shared agent image policy")
}
}
return nil
}

View File

@@ -15,14 +15,32 @@ LINKFLAGS="-X github.com/rancher/k3k.Version=$VERSION"
LINKFLAGS="-X github.com/rancher/k3k.GitCommit=$COMMIT $LINKFLAGS"
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
GOOS=darwin go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin
GOOS=windows go build -ldflags "$LINKFLAGS" -o bin/k3k-windows
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-s390x
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-arm64
GOOS=freebsd GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-freebsd
GOOS=darwin GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin-amd64
GOOS=darwin GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin-aarch64
GOOS=windows GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-windows
fi
# build k3k-kubelet
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-kubelet ./k3k-kubelet
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-kubelet-s390x
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-kubelet-arm64
GOOS=freebsd GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-kubelet-freebsd
GOOS=darwin GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-kubelet-darwin-amd64
GOOS=darwin GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-kubelet-darwin-aarch64
GOOS=windows GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-kubelet-windows
fi
# build k3kcli
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli ./cli
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
GOOS=darwin go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin ./cli
GOOS=windows go build -ldflags "$LINKFLAGS" -o bin/k3kcli-windows ./cli
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli-s390x ./cli
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli-arm64 ./cli
GOOS=freebsd GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-freebsd ./cli
GOOS=darwin GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin-amd64 ./cli
GOOS=darwin GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin-aarch64 ./cli
GOOS=windows GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-windows ./cli
fi

8
ops/build-crds Executable file
View File

@@ -0,0 +1,8 @@
#! /bin/sh
cd $(dirname $0)/../
# This will return non-zero until all of our objects in ./pkg/apis can generate valid crds.
# allowDangerousTypes is needed for struct that use floats
controller-gen crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=false paths=./pkg/apis/... output:crd:dir=./charts/k3k/crds

16
ops/checksum Executable file
View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
set -ex
cd $(dirname $0)/..
CHECKSUM_DIR=${CHECKSUM_DIR:-./bin}
sumfile="${CHECKSUM_DIR}/sha256sum.txt"
echo -n "" > "${sumfile}"
files=$(ls ${CHECKSUM_DIR} | grep -v "sha256sum.txt")
for file in ${files}; do
sha256sum "${CHECKSUM_DIR}/${file}" | sed "s;$(dirname ${CHECKSUM_DIR}/${file})/;;g" >> "${sumfile}"
done
cat "${sumfile}"

1
ops/ci
View File

@@ -4,6 +4,7 @@ set -e
cd $(dirname $0)
./build
./checksum
./test
./validate
./validate-ci

View File

@@ -12,21 +12,20 @@ if [ $(git tag -l "$version") ]; then
exit 1
fi
# release the chart with artifacts
cr upload --token ${GITHUB_TOKEN} \
--release-name-template "chart-{{ .Version }}" \
--package-path ./deploy/ \
--git-repo k3k \
--skip-existing \
-o rancher
# update the index.yaml
cr index --token ${GITHUB_TOKEN} \
--release-name-template "chart-{{ .Version }}" \
--package-path ./deploy/ \
--index-path index.yaml \
--git-repo k3k \
-o rancher \
--push
-o rancher
# push to gh-pages
git config --global user.email "hussein.galal.ahmed.11@gmail.com"
git config --global user.name "galal-hussein"
git config --global url.https://${GITHUB_TOKEN}@github.com/.insteadOf https://github.com/
# push index.yaml to gh-pages
git add index.yaml
git commit -m "add chart-${CHART_TAG} to index.yaml"
git push --force --set-upstream origin HEAD:gh-pages

View File

@@ -8,6 +8,7 @@ cd $(dirname $0)/..
mkdir -p dist/artifacts
cp bin/k3k dist/artifacts/k3k${SUFFIX}
cp bin/k3kcli dist/artifacts/k3kcli${SUFFIX}
cp bin/k3k-kubelet dist/artifacts/k3k-kubelet${SUFFIX}
IMAGE=${REPO}/k3k:${TAG}
DOCKERFILE=package/Dockerfile
@@ -17,3 +18,13 @@ fi
docker build -f ${DOCKERFILE} -t ${IMAGE} .
echo Built ${IMAGE}
# todo: This might need to go to it's own repo
IMAGE=${REPO}/k3k:${TAG}-kubelet
DOCKERFILE=package/Dockerfile.kubelet
if [ -e ${DOCKERFILE}.${ARCH} ]; then
DOCKERFILE=${DOCKERFILE}.${ARCH}
fi
docker build -f ${DOCKERFILE} -t ${IMAGE} .
echo Built ${IMAGE}

View File

@@ -3,5 +3,7 @@ set -e
cd $(dirname $0)/..
echo Running tests
go test -cover -tags=test ./...
if [ -z ${SKIP_TESTS} ]; then
echo Running tests
go test -cover -tags=test ./...
fi

View File

@@ -5,7 +5,7 @@ if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
fi
COMMIT=$(git rev-parse --short HEAD)
GIT_TAG=${DRONE_TAG:-$(git tag -l --contains HEAD | head -n 1)}
GIT_TAG=${TAG:-$(git tag -l --contains HEAD | head -n 1)}
if [[ -z "$DIRTY" && -n "$GIT_TAG" ]]; then
VERSION=$GIT_TAG
@@ -19,9 +19,15 @@ fi
SUFFIX="-${ARCH}"
TAG=${TAG:-${VERSION}${SUFFIX}}
if [[ $VERSION = "chart*" ]]; then
TAG=${TAG:-${VERSION}}
else
TAG=${TAG:-${VERSION}${SUFFIX}}
fi
REPO=${REPO:-rancher}
if echo $TAG | grep -q dirty; then
if echo $TAG | grep dirty; then
TAG=dev
fi

View File

@@ -1,4 +1,9 @@
FROM alpine
COPY bin/k3k /usr/bin/
COPY bin/k3kcli /usr/bin/
ARG BIN_K3K=bin/k3k
ARG BIN_K3KCLI=bin/k3kcli
COPY ${BIN_K3K} /usr/bin/
COPY ${BIN_K3KCLI} /usr/bin/
CMD ["k3k"]

View File

@@ -0,0 +1,8 @@
# TODO: swicth this to BCI-micro or scratch. Left as base right now so that debug can be done a bit easier
FROM registry.suse.com/bci/bci-base:15.6
ARG BIN_K3K_KUBELET=bin/k3k-kubelet
COPY ${BIN_K3K_KUBELET} /usr/bin/
ENTRYPOINT ["/usr/bin/k3k-kubelet"]

View File

@@ -21,7 +21,10 @@ func Resource(resource string) schema.GroupResource {
func addKnownTypes(s *runtime.Scheme) error {
s.AddKnownTypes(SchemeGroupVersion,
&Cluster{},
&ClusterList{})
&ClusterList{},
&ClusterSet{},
&ClusterSetList{},
)
metav1.AddToGroupVersion(s, SchemeGroupVersion)
return nil
}

View File

@@ -0,0 +1,86 @@
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
type ClusterSet struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
// +kubebuilder:default={}
//
// Spec is the spec of the ClusterSet
Spec ClusterSetSpec `json:"spec"`
// Status is the status of the ClusterSet
Status ClusterSetStatus `json:"status,omitempty"`
}
type ClusterSetSpec struct {
// MaxLimits are the limits that apply to all clusters (server + agent) in the set
MaxLimits v1.ResourceList `json:"maxLimits,omitempty"`
// DefaultLimits are the limits used for servers/agents when a cluster in the set doesn't provide any
DefaultLimits *ClusterLimit `json:"defaultLimits,omitempty"`
// DefaultNodeSelector is the node selector that applies to all clusters (server + agent) in the set
DefaultNodeSelector map[string]string `json:"defaultNodeSelector,omitempty"`
// DefaultPriorityClass is the priorityClassName applied to all pods of all clusters in the set
DefaultPriorityClass string `json:"defaultPriorityClass,omitempty"`
// DisableNetworkPolicy is an option that will disable the creation of a default networkpolicy for cluster isolation
DisableNetworkPolicy bool `json:"disableNetworkPolicy,omitempty"`
// +kubebuilder:default={shared}
// +kubebuilder:validation:XValidation:message="mode is immutable",rule="self == oldSelf"
// +kubebuilder:validation:MinItems=1
//
// AllowedNodeTypes are the allowed cluster provisioning modes. Defaults to [shared].
AllowedNodeTypes []ClusterMode `json:"allowedNodeTypes,omitempty"`
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
PodSecurityAdmissionLevel *PodSecurityAdmissionLevel `json:"podSecurityAdmissionLevel,omitempty"`
}
// +kubebuilder:validation:Enum=privileged;baseline;restricted
//
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
type PodSecurityAdmissionLevel string
const (
PrivilegedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("privileged")
BaselinePodSecurityAdmissionLevel = PodSecurityAdmissionLevel("baseline")
RestrictedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("restricted")
)
type ClusterSetStatus struct {
// ObservedGeneration was the generation at the time the status was updated.
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// LastUpdate is the timestamp when the status was last updated
LastUpdate string `json:"lastUpdateTime,omitempty"`
// Summary is a summary of the status
Summary string `json:"summary,omitempty"`
// Conditions are the invidual conditions for the cluster set
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ClusterSetList struct {
metav1.ListMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
Items []ClusterSet `json:"items"`
}

View File

@@ -1,35 +1,127 @@
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
type Cluster struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
// +kubebuilder:default={}
// +optional
Spec ClusterSpec `json:"spec"`
Status ClusterStatus `json:"status"`
Status ClusterStatus `json:"status,omitempty"`
}
type ClusterSpec struct {
Name string `json:"name"`
Version string `json:"version"`
Servers *int32 `json:"servers"`
Agents *int32 `json:"agents"`
Token string `json:"token"`
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
ServerArgs []string `json:"serverArgs,omitempty"`
AgentArgs []string `json:"agentArgs,omitempty"`
TLSSANs []string `json:"tlsSANs,omitempty"`
// Version is a string representing the Kubernetes version to be used by the virtual nodes.
//
// +optional
Version string `json:"version"`
// Servers is the number of K3s pods to run in server (controlplane) mode.
//
// +kubebuilder:default=1
// +kubebuilder:validation:XValidation:message="cluster must have at least one server",rule="self >= 1"
// +optional
Servers *int32 `json:"servers"`
// Agents is the number of K3s pods to run in agent (worker) mode.
//
// +kubebuilder:default=0
// +kubebuilder:validation:XValidation:message="invalid value for agents",rule="self >= 0"
// +optional
Agents *int32 `json:"agents"`
// NodeSelector is the node selector that will be applied to all server/agent pods.
// In "shared" mode the node selector will be applied also to the workloads.
//
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// PriorityClass is the priorityClassName that will be applied to all server/agent pods.
// In "shared" mode the priorityClassName will be applied also to the workloads.
PriorityClass string `json:"priorityClass,omitempty"`
// Limit is the limits that apply for the server/worker nodes.
Limit *ClusterLimit `json:"clusterLimit,omitempty"`
// TokenSecretRef is Secret reference used as a token join server and worker nodes to the cluster. The controller
// assumes that the secret has a field "token" in its data, any other fields in the secret will be ignored.
// +optional
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef"`
// ClusterCIDR is the CIDR range for the pods of the cluster. Defaults to 10.42.0.0/16.
// +kubebuilder:validation:XValidation:message="clusterCIDR is immutable",rule="self == oldSelf"
ClusterCIDR string `json:"clusterCIDR,omitempty"`
// ServiceCIDR is the CIDR range for the services in the cluster. Defaults to 10.43.0.0/16.
// +kubebuilder:validation:XValidation:message="serviceCIDR is immutable",rule="self == oldSelf"
ServiceCIDR string `json:"serviceCIDR,omitempty"`
// ClusterDNS is the IP address for the coredns service. Needs to be in the range provided by ServiceCIDR or CoreDNS may not deploy.
// Defaults to 10.43.0.10.
// +kubebuilder:validation:XValidation:message="clusterDNS is immutable",rule="self == oldSelf"
ClusterDNS string `json:"clusterDNS,omitempty"`
// ServerArgs are the ordered key value pairs (e.x. "testArg", "testValue") for the K3s pods running in server mode.
ServerArgs []string `json:"serverArgs,omitempty"`
// AgentArgs are the ordered key value pairs (e.x. "testArg", "testValue") for the K3s pods running in agent mode.
AgentArgs []string `json:"agentArgs,omitempty"`
// TLSSANs are the subjectAlternativeNames for the certificate the K3s server will use.
TLSSANs []string `json:"tlsSANs,omitempty"`
// Addons is a list of secrets containing raw YAML which will be deployed in the virtual K3k cluster on startup.
Addons []Addon `json:"addons,omitempty"`
// Mode is the cluster provisioning mode which can be either "shared" or "virtual". Defaults to "shared"
//
// +kubebuilder:default="shared"
// +kubebuilder:validation:Enum=shared;virtual
// +kubebuilder:validation:XValidation:message="mode is immutable",rule="self == oldSelf"
// +optional
Mode ClusterMode `json:"mode,omitempty"`
// Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data
// persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field.
Persistence *PersistenceConfig `json:"persistence,omitempty"`
Expose *ExposeConfig `json:"expose,omitempty"`
// Expose contains options for exposing the apiserver inside/outside of the cluster. By default, this is only exposed as a
// clusterIP which is relatively secure, but difficult to access outside of the cluster.
// +optional
Expose *ExposeConfig `json:"expose,omitempty"`
}
// +kubebuilder:validation:Enum=shared;virtual
// +kubebuilder:default="shared"
//
// ClusterMode is the possible provisioning mode of a Cluster.
type ClusterMode string
const (
SharedClusterMode = ClusterMode("shared")
VirtualClusterMode = ClusterMode("virtual")
)
type ClusterLimit struct {
// ServerLimit is the limits (cpu/mem) that apply to the server nodes
ServerLimit v1.ResourceList `json:"serverLimit,omitempty"`
// WorkerLimit is the limits (cpu/mem) that apply to the agent nodes
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
}
type Addon struct {
SecretNamespace string `json:"secretNamespace,omitempty"`
SecretRef string `json:"secretRef,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -43,20 +135,24 @@ type ClusterList struct {
type PersistenceConfig struct {
// Type can be ephermal, static, dynamic
// +kubebuilder:default="ephemeral"
Type string `json:"type"`
StorageClassName string `json:"storageClassName,omitempty"`
StorageRequestSize string `json:"storageRequestSize,omitempty"`
}
type ExposeConfig struct {
Ingress *IngressConfig `json:"ingress"`
LoadBalancer *LoadBalancerConfig `json:"loadbalancer"`
NodePort *NodePortConfig `json:"nodePort"`
// +optional
Ingress *IngressConfig `json:"ingress,omitempty"`
// +optional
LoadBalancer *LoadBalancerConfig `json:"loadbalancer,omitempty"`
// +optional
NodePort *NodePortConfig `json:"nodePort,omitempty"`
}
type IngressConfig struct {
Enabled bool `json:"enabled"`
IngressClassName string `json:"ingressClassName"`
Enabled bool `json:"enabled,omitempty"`
IngressClassName string `json:"ingressClassName,omitempty"`
}
type LoadBalancerConfig struct {
@@ -68,40 +164,10 @@ type NodePortConfig struct {
}
type ClusterStatus struct {
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
}
type Allocation struct {
ClusterName string `json:"clusterName"`
Issued int64 `json:"issued"`
IPNet string `json:"ipNet"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type CIDRAllocationPool struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
Spec CIDRAllocationPoolSpec `json:"spec"`
Status CIDRAllocationPoolStatus `json:"status"`
}
type CIDRAllocationPoolSpec struct {
DefaultClusterCIDR string `json:"defaultClusterCIDR"`
}
type CIDRAllocationPoolStatus struct {
Pool []Allocation `json:"pool"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type CIDRAllocationPoolList struct {
metav1.ListMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
Items []CIDRAllocationPool `json:"items"`
HostVersion string `json:"hostVersion,omitempty"`
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
TLSSANs []string `json:"tlsSANs,omitempty"`
Persistence *PersistenceConfig `json:"persistence,omitempty"`
}

View File

@@ -6,119 +6,23 @@
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Allocation) DeepCopyInto(out *Allocation) {
func (in *Addon) DeepCopyInto(out *Addon) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Allocation.
func (in *Allocation) DeepCopy() *Allocation {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addon.
func (in *Addon) DeepCopy() *Addon {
if in == nil {
return nil
}
out := new(Allocation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPool) DeepCopyInto(out *CIDRAllocationPool) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.TypeMeta = in.TypeMeta
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPool.
func (in *CIDRAllocationPool) DeepCopy() *CIDRAllocationPool {
if in == nil {
return nil
}
out := new(CIDRAllocationPool)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CIDRAllocationPool) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPoolList) DeepCopyInto(out *CIDRAllocationPoolList) {
*out = *in
in.ListMeta.DeepCopyInto(&out.ListMeta)
out.TypeMeta = in.TypeMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CIDRAllocationPool, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPoolList.
func (in *CIDRAllocationPoolList) DeepCopy() *CIDRAllocationPoolList {
if in == nil {
return nil
}
out := new(CIDRAllocationPoolList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CIDRAllocationPoolList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPoolSpec) DeepCopyInto(out *CIDRAllocationPoolSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPoolSpec.
func (in *CIDRAllocationPoolSpec) DeepCopy() *CIDRAllocationPoolSpec {
if in == nil {
return nil
}
out := new(CIDRAllocationPoolSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPoolStatus) DeepCopyInto(out *CIDRAllocationPoolStatus) {
*out = *in
if in.Pool != nil {
in, out := &in.Pool, &out.Pool
*out = make([]Allocation, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPoolStatus.
func (in *CIDRAllocationPoolStatus) DeepCopy() *CIDRAllocationPoolStatus {
if in == nil {
return nil
}
out := new(CIDRAllocationPoolStatus)
out := new(Addon)
in.DeepCopyInto(out)
return out
}
@@ -129,7 +33,7 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
in.Status.DeepCopyInto(&out.Status)
return
}
@@ -151,6 +55,36 @@ func (in *Cluster) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterLimit) DeepCopyInto(out *ClusterLimit) {
*out = *in
if in.ServerLimit != nil {
in, out := &in.ServerLimit, &out.ServerLimit
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.WorkerLimit != nil {
in, out := &in.WorkerLimit, &out.WorkerLimit
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLimit.
func (in *ClusterLimit) DeepCopy() *ClusterLimit {
if in == nil {
return nil
}
out := new(ClusterLimit)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterList) DeepCopyInto(out *ClusterList) {
*out = *in
@@ -184,6 +118,125 @@ func (in *ClusterList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSet) DeepCopyInto(out *ClusterSet) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSet.
func (in *ClusterSet) DeepCopy() *ClusterSet {
if in == nil {
return nil
}
out := new(ClusterSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSetList) DeepCopyInto(out *ClusterSetList) {
*out = *in
in.ListMeta.DeepCopyInto(&out.ListMeta)
out.TypeMeta = in.TypeMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetList.
func (in *ClusterSetList) DeepCopy() *ClusterSetList {
if in == nil {
return nil
}
out := new(ClusterSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSetSpec) DeepCopyInto(out *ClusterSetSpec) {
*out = *in
if in.MaxLimits != nil {
in, out := &in.MaxLimits, &out.MaxLimits
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.DefaultLimits != nil {
in, out := &in.DefaultLimits, &out.DefaultLimits
*out = new(ClusterLimit)
(*in).DeepCopyInto(*out)
}
if in.DefaultNodeSelector != nil {
in, out := &in.DefaultNodeSelector, &out.DefaultNodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetSpec.
func (in *ClusterSetSpec) DeepCopy() *ClusterSetSpec {
if in == nil {
return nil
}
out := new(ClusterSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSetStatus) DeepCopyInto(out *ClusterSetStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetStatus.
func (in *ClusterSetStatus) DeepCopy() *ClusterSetStatus {
if in == nil {
return nil
}
out := new(ClusterSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = *in
@@ -197,6 +250,18 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(int32)
**out = **in
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Limit != nil {
in, out := &in.Limit, &out.Limit
*out = new(ClusterLimit)
(*in).DeepCopyInto(*out)
}
if in.ServerArgs != nil {
in, out := &in.ServerArgs, &out.ServerArgs
*out = make([]string, len(*in))
@@ -212,6 +277,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Addons != nil {
in, out := &in.Addons, &out.Addons
*out = make([]Addon, len(*in))
copy(*out, *in)
}
if in.Persistence != nil {
in, out := &in.Persistence, &out.Persistence
*out = new(PersistenceConfig)
@@ -238,6 +308,16 @@ func (in *ClusterSpec) DeepCopy() *ClusterSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = *in
if in.TLSSANs != nil {
in, out := &in.TLSSANs, &out.TLSSANs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Persistence != nil {
in, out := &in.Persistence, &out.Persistence
*out = new(PersistenceConfig)
**out = **in
}
return
}

View File

@@ -0,0 +1,3 @@
package buildinfo
var Version = "dev"

View File

@@ -0,0 +1,71 @@
package certs
import (
"crypto"
"crypto/x509"
"fmt"
"net"
"time"
certutil "github.com/rancher/dynamiclistener/cert"
)
func CreateClientCertKey(commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, expiresAt time.Duration, caCert, caKey string) ([]byte, []byte, error) {
caKeyPEM, err := certutil.ParsePrivateKeyPEM([]byte(caKey))
if err != nil {
return nil, nil, err
}
caCertPEM, err := certutil.ParseCertsPEM([]byte(caCert))
if err != nil {
return nil, nil, err
}
b, err := generateKey()
if err != nil {
return nil, nil, err
}
key, err := certutil.ParsePrivateKeyPEM(b)
if err != nil {
return nil, nil, err
}
cfg := certutil.Config{
CommonName: commonName,
Organization: organization,
Usages: extKeyUsage,
ExpiresAt: expiresAt,
}
if altNames != nil {
cfg.AltNames = *altNames
}
cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCertPEM[0], caKeyPEM.(crypto.Signer))
if err != nil {
return nil, nil, err
}
return append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCertPEM[0])...), b, nil
}
func generateKey() (data []byte, err error) {
generatedData, err := certutil.MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, fmt.Errorf("error generating key: %v", err)
}
return generatedData, nil
}
func AddSANs(sans []string) certutil.AltNames {
var altNames certutil.AltNames
for _, san := range sans {
ip := net.ParseIP(san)
if ip == nil {
altNames.DNSNames = append(altNames.DNSNames, san)
} else {
altNames.IPs = append(altNames.IPs, ip)
}
}
return altNames
}

View File

@@ -2,231 +2,27 @@ package agent
import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"github.com/rancher/k3k/pkg/controller"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
const agentName = "k3k-agent"
const (
configName = "agent-config"
)
func Agent(cluster *v1alpha1.Cluster) *apps.Deployment {
image := util.K3SImage(cluster)
return &apps.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + agentName,
Namespace: util.ClusterNamespace(cluster),
},
Spec: apps.DeploymentSpec{
Replicas: cluster.Spec.Agents,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": cluster.Name,
"type": "agent",
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"cluster": cluster.Name,
"type": "agent",
},
},
Spec: agentPodSpec(image, agentName, cluster.Spec.AgentArgs, false),
},
},
}
type Agent interface {
Name() string
Config() ctrlruntimeclient.Object
Resources() ([]ctrlruntimeclient.Object, error)
}
func StatefulAgent(cluster *v1alpha1.Cluster) *apps.StatefulSet {
image := util.K3SImage(cluster)
return &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "Statefulset",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + agentName,
Namespace: util.ClusterNamespace(cluster),
},
Spec: apps.StatefulSetSpec{
ServiceName: cluster.Name + "-" + agentName + "-headless",
Replicas: cluster.Spec.Agents,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": cluster.Name,
"type": "agent",
},
},
VolumeClaimTemplates: []v1.PersistentVolumeClaim{
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibrancherk3s",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibkubelet",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
},
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"cluster": cluster.Name,
"type": "agent",
},
},
Spec: agentPodSpec(image, agentName, cluster.Spec.AgentArgs, true),
},
},
func New(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage, sharedAgentImagePullPolicy, token string) Agent {
if cluster.Spec.Mode == VirtualNodeMode {
return NewVirtualAgent(cluster, serviceIP, token)
}
return NewSharedAgent(cluster, serviceIP, sharedAgentImage, sharedAgentImagePullPolicy, token)
}
func agentPodSpec(image, name string, args []string, statefulSet bool) v1.PodSpec {
args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
podSpec := v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "config",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name + "-config",
Items: []v1.KeyToPath{
{
Key: "config.yaml",
Path: "config.yaml",
},
},
},
},
},
{
Name: "run",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varrun",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibcni",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlog",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
Containers: []v1.Container{
{
Name: name,
Image: image,
SecurityContext: &v1.SecurityContext{
Privileged: pointer.Bool(true),
},
Command: []string{
"/bin/k3s",
},
Args: args,
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
MountPath: "/opt/rancher/k3s/",
ReadOnly: false,
},
{
Name: "run",
MountPath: "/run",
ReadOnly: false,
},
{
Name: "varrun",
MountPath: "/var/run",
ReadOnly: false,
},
{
Name: "varlibcni",
MountPath: "/var/lib/cni",
ReadOnly: false,
},
{
Name: "varlibkubelet",
MountPath: "/var/lib/kubelet",
ReadOnly: false,
},
{
Name: "varlibrancherk3s",
MountPath: "/var/lib/rancher/k3s",
ReadOnly: false,
},
{
Name: "varlog",
MountPath: "/var/log",
ReadOnly: false,
},
},
},
},
}
if !statefulSet {
podSpec.Volumes = append(podSpec.Volumes, v1.Volume{
Name: "varlibkubelet",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
}, v1.Volume{
Name: "varlibrancherk3s",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
)
}
return podSpec
func configSecretName(clusterName string) string {
return controller.SafeConcatNameWithPrefix(clusterName, configName)
}

View File

@@ -1,30 +0,0 @@
package agent
import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func StatefulAgentService(cluster *v1alpha1.Cluster) *v1.Service {
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + agentName + "-headless",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
ClusterIP: v1.ClusterIPNone,
Selector: map[string]string{
"cluster": cluster.Name,
"role": "agent",
},
Ports: []v1.ServicePort{},
},
}
}

View File

@@ -0,0 +1,401 @@
package agent
import (
"crypto"
"crypto/x509"
"fmt"
"time"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
const (
sharedKubeletConfigPath = "/opt/rancher/k3k/config.yaml"
SharedNodeAgentName = "kubelet"
SharedNodeMode = "shared"
)
type SharedAgent struct {
cluster *v1alpha1.Cluster
serviceIP string
image string
imagePullPolicy string
token string
}
func NewSharedAgent(cluster *v1alpha1.Cluster, serviceIP, image, imagePullPolicy, token string) Agent {
return &SharedAgent{
cluster: cluster,
serviceIP: serviceIP,
image: image,
imagePullPolicy: imagePullPolicy,
token: token,
}
}
func (s *SharedAgent) Config() ctrlruntimeclient.Object {
config := sharedAgentData(s.cluster, s.token, s.Name(), s.serviceIP)
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: configSecretName(s.cluster.Name),
Namespace: s.cluster.Namespace,
},
Data: map[string][]byte{
"config.yaml": []byte(config),
},
}
}
func sharedAgentData(cluster *v1alpha1.Cluster, token, nodeName, ip string) string {
version := cluster.Spec.Version
if cluster.Spec.Version == "" {
version = cluster.Status.HostVersion
}
return fmt.Sprintf(`clusterName: %s
clusterNamespace: %s
nodeName: %s
agentHostname: %s
serverIP: %s
token: %s
version: %s`,
cluster.Name, cluster.Namespace, nodeName, nodeName, ip, token, version)
}
func (s *SharedAgent) Resources() ([]ctrlruntimeclient.Object, error) {
// generate certs for webhook
certSecret, err := s.webhookTLS()
if err != nil {
return nil, err
}
return []ctrlruntimeclient.Object{
s.serviceAccount(),
s.role(),
s.roleBinding(),
s.service(),
s.deployment(),
s.dnsService(),
certSecret}, nil
}
func (s *SharedAgent) deployment() *apps.Deployment {
labels := map[string]string{
"cluster": s.cluster.Name,
"type": "agent",
"mode": "shared",
}
return &apps.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: s.Name(),
Namespace: s.cluster.Namespace,
Labels: labels,
},
Spec: apps.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: s.podSpec(),
},
},
}
}
func (s *SharedAgent) podSpec() v1.PodSpec {
var limit v1.ResourceList
return v1.PodSpec{
ServiceAccountName: s.Name(),
Volumes: []v1.Volume{
{
Name: "config",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: configSecretName(s.cluster.Name),
Items: []v1.KeyToPath{
{
Key: "config.yaml",
Path: "config.yaml",
},
},
},
},
},
{
Name: "webhook-certs",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: WebhookSecretName(s.cluster.Name),
Items: []v1.KeyToPath{
{
Key: "tls.crt",
Path: "tls.crt",
},
{
Key: "tls.key",
Path: "tls.key",
},
{
Key: "ca.crt",
Path: "ca.crt",
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: s.Name(),
Image: s.image,
ImagePullPolicy: v1.PullPolicy(s.imagePullPolicy),
Resources: v1.ResourceRequirements{
Limits: limit,
},
Args: []string{
"--config",
sharedKubeletConfigPath,
},
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
MountPath: "/opt/rancher/k3k/",
ReadOnly: false,
},
{
Name: "webhook-certs",
MountPath: "/opt/rancher/k3k-webhook",
ReadOnly: false,
},
},
Ports: []v1.ContainerPort{
{
Name: "webhook-port",
Protocol: v1.ProtocolTCP,
ContainerPort: 9443,
},
},
},
}}
}
func (s *SharedAgent) service() *v1.Service {
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: s.Name(),
Namespace: s.cluster.Namespace,
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
Selector: map[string]string{
"cluster": s.cluster.Name,
"type": "agent",
"mode": "shared",
},
Ports: []v1.ServicePort{
{
Name: "k3s-kubelet-port",
Protocol: v1.ProtocolTCP,
Port: 10250,
},
{
Name: "webhook-server",
Protocol: v1.ProtocolTCP,
Port: 9443,
TargetPort: intstr.FromInt32(9443),
},
},
},
}
}
func (s *SharedAgent) dnsService() *v1.Service {
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: s.DNSName(),
Namespace: s.cluster.Namespace,
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
Selector: map[string]string{
translate.ClusterNameLabel: s.cluster.Name,
"k8s-app": "kube-dns",
},
Ports: []v1.ServicePort{
{
Name: "dns",
Protocol: v1.ProtocolUDP,
Port: 53,
TargetPort: intstr.FromInt32(53),
},
{
Name: "dns-tcp",
Protocol: v1.ProtocolTCP,
Port: 53,
TargetPort: intstr.FromInt32(53),
},
{
Name: "metrics",
Protocol: v1.ProtocolTCP,
Port: 9153,
TargetPort: intstr.FromInt32(9153),
},
},
},
}
}
func (s *SharedAgent) serviceAccount() *v1.ServiceAccount {
return &v1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
Kind: "ServiceAccount",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: s.Name(),
Namespace: s.cluster.Namespace,
},
}
}
func (s *SharedAgent) role() *rbacv1.Role {
return &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
Kind: "Role",
APIVersion: "rbac.authorization.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: s.Name(),
Namespace: s.cluster.Namespace,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"persistentvolumeclaims", "pods", "pods/log", "pods/exec", "secrets", "configmaps", "services"},
Verbs: []string{"*"},
},
{
APIGroups: []string{"k3k.io"},
Resources: []string{"clusters"},
Verbs: []string{"get", "watch", "list"},
},
},
}
}
func (s *SharedAgent) roleBinding() *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
Kind: "RoleBinding",
APIVersion: "rbac.authorization.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: s.Name(),
Namespace: s.cluster.Namespace,
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: s.Name(),
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: s.Name(),
Namespace: s.cluster.Namespace,
},
},
}
}
func (s *SharedAgent) Name() string {
return controller.SafeConcatNameWithPrefix(s.cluster.Name, SharedNodeAgentName)
}
func (s *SharedAgent) DNSName() string {
return controller.SafeConcatNameWithPrefix(s.cluster.Name, "kube-dns")
}
func (s *SharedAgent) webhookTLS() (*v1.Secret, error) {
// generate CA CERT/KEY
caKeyBytes, err := certutil.MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, err
}
caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes)
if err != nil {
return nil, err
}
cfg := certutil.Config{
CommonName: fmt.Sprintf("k3k-webhook-ca@%d", time.Now().Unix()),
}
caCert, err := certutil.NewSelfSignedCACert(cfg, caKey.(crypto.Signer))
if err != nil {
return nil, err
}
caCertBytes := certutil.EncodeCertPEM(caCert)
// generate webhook cert bundle
altNames := certs.AddSANs([]string{s.Name(), s.cluster.Name})
webhookCert, webhookKey, err := certs.CreateClientCertKey(
s.Name(), nil,
&altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, time.Hour*24*time.Duration(356),
string(caCertBytes),
string(caKeyBytes))
if err != nil {
return nil, err
}
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: WebhookSecretName(s.cluster.Name),
Namespace: s.cluster.Namespace,
},
Data: map[string][]byte{
"tls.crt": webhookCert,
"tls.key": webhookKey,
"ca.crt": caCertBytes,
"ca.key": caKeyBytes,
},
}, nil
}
func WebhookSecretName(clusterName string) string {
return controller.SafeConcatNameWithPrefix(clusterName, "webhook")
}

View File

@@ -0,0 +1,212 @@
package agent
import (
"fmt"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
const (
VirtualNodeMode = "virtual"
virtualNodeAgentName = "agent"
)
type VirtualAgent struct {
cluster *v1alpha1.Cluster
serviceIP string
token string
}
func NewVirtualAgent(cluster *v1alpha1.Cluster, serviceIP, token string) Agent {
return &VirtualAgent{
cluster: cluster,
serviceIP: serviceIP,
token: token,
}
}
func (v *VirtualAgent) Config() ctrlruntimeclient.Object {
config := virtualAgentData(v.serviceIP, v.token)
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: configSecretName(v.cluster.Name),
Namespace: v.cluster.Namespace,
},
Data: map[string][]byte{
"config.yaml": []byte(config),
},
}
}
func (v *VirtualAgent) Resources() ([]ctrlruntimeclient.Object, error) {
return []ctrlruntimeclient.Object{v.deployment()}, nil
}
func virtualAgentData(serviceIP, token string) string {
return fmt.Sprintf(`server: https://%s:6443
token: %s
with-node-id: true`, serviceIP, token)
}
func (v *VirtualAgent) deployment() *apps.Deployment {
image := controller.K3SImage(v.cluster)
const name = "k3k-agent"
selector := metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": v.cluster.Name,
"type": "agent",
"mode": "virtual",
},
}
return &apps.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: v.Name(),
Namespace: v.cluster.Namespace,
Labels: selector.MatchLabels,
},
Spec: apps.DeploymentSpec{
Replicas: v.cluster.Spec.Agents,
Selector: &selector,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: selector.MatchLabels,
},
Spec: v.podSpec(image, name, v.cluster.Spec.AgentArgs, &selector),
},
},
}
}
func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelector *metav1.LabelSelector) v1.PodSpec {
var limit v1.ResourceList
args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
podSpec := v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "config",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: configSecretName(v.cluster.Name),
Items: []v1.KeyToPath{
{
Key: "config.yaml",
Path: "config.yaml",
},
},
},
},
},
{
Name: "run",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varrun",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibcni",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlog",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibkubelet",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibrancherk3s",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
Containers: []v1.Container{
{
Name: name,
Image: image,
SecurityContext: &v1.SecurityContext{
Privileged: ptr.To(true),
},
Args: args,
Command: []string{
"/bin/k3s",
},
Resources: v1.ResourceRequirements{
Limits: limit,
},
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
MountPath: "/opt/rancher/k3s/",
ReadOnly: false,
},
{
Name: "run",
MountPath: "/run",
ReadOnly: false,
},
{
Name: "varrun",
MountPath: "/var/run",
ReadOnly: false,
},
{
Name: "varlibcni",
MountPath: "/var/lib/cni",
ReadOnly: false,
},
{
Name: "varlibkubelet",
MountPath: "/var/lib/kubelet",
ReadOnly: false,
},
{
Name: "varlibrancherk3s",
MountPath: "/var/lib/rancher/k3s",
ReadOnly: false,
},
{
Name: "varlog",
MountPath: "/var/log",
ReadOnly: false,
},
},
},
},
}
return podSpec
}
func (v *VirtualAgent) Name() string {
return controller.SafeConcatNameWithPrefix(v.cluster.Name, virtualNodeAgentName)
}

View File

@@ -0,0 +1,435 @@
package cluster
import (
"context"
"errors"
"fmt"
"reflect"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"github.com/rancher/k3k/pkg/log"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
ctrlruntimecontroller "sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
namePrefix = "k3k"
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
ClusterInvalidName = "system"
maxConcurrentReconciles = 1
defaultClusterCIDR = "10.44.0.0/16"
defaultClusterServiceCIDR = "10.45.0.0/16"
defaultStoragePersistentSize = "1G"
memberRemovalTimeout = time.Minute * 1
)
type ClusterReconciler struct {
DiscoveryClient *discovery.DiscoveryClient
Client ctrlruntimeclient.Client
Scheme *runtime.Scheme
SharedAgentImage string
SharedAgentImagePullPolicy string
logger *log.Logger
}
// Add adds a new controller to the manager
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy string, logger *log.Logger) error {
discoveryClient, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
return err
}
// initialize a new Reconciler
reconciler := ClusterReconciler{
DiscoveryClient: discoveryClient,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
SharedAgentImage: sharedAgentImage,
SharedAgentImagePullPolicy: sharedAgentImagePullPolicy,
logger: logger.Named(clusterController),
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.Cluster{}).
WithOptions(ctrlruntimecontroller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Complete(&reconciler)
}
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
var (
cluster v1alpha1.Cluster
podList v1.PodList
)
log := c.logger.With("Cluster", req.NamespacedName)
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// if the Version is not specified we will try to use the same Kubernetes version of the host.
// This version is stored in the Status object, and it will not be updated if already set.
if cluster.Spec.Version == "" && cluster.Status.HostVersion == "" {
hostVersion, err := c.DiscoveryClient.ServerVersion()
if err != nil {
return reconcile.Result{}, err
}
// update Status HostVersion
cluster.Status.HostVersion = fmt.Sprintf("v%s.%s.0-k3s1", hostVersion.Major, hostVersion.Minor)
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
if cluster.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
controllerutil.AddFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
log.Info("enqueue cluster")
return reconcile.Result{}, c.createCluster(ctx, &cluster, log)
}
// remove finalizer from the server pods and update them.
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: cluster.Namespace}
matchingLabels.ApplyToList(listOpts)
if err := c.Client.List(ctx, &podList, listOpts); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
for _, pod := range podList.Items {
if controllerutil.ContainsFinalizer(&pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName)
if err := c.Client.Update(ctx, &pod); err != nil {
return reconcile.Result{}, err
}
}
}
if err := c.unbindNodeProxyClusterRole(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
// remove finalizer from the cluster and update it.
controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
log.Info("deleting cluster")
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1.Cluster, log *zap.SugaredLogger) error {
if err := c.validate(cluster); err != nil {
log.Errorw("invalid change", zap.Error(err))
return nil
}
token, err := c.token(ctx, cluster)
if err != nil {
return err
}
s := server.New(cluster, c.Client, token, string(cluster.Spec.Mode))
if cluster.Spec.Persistence != nil {
cluster.Status.Persistence = cluster.Spec.Persistence
if cluster.Spec.Persistence.StorageRequestSize == "" {
// default to 1G of request size
cluster.Status.Persistence.StorageRequestSize = defaultStoragePersistentSize
}
}
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
if cluster.Status.ClusterCIDR == "" {
cluster.Status.ClusterCIDR = defaultClusterCIDR
}
cluster.Status.ServiceCIDR = cluster.Spec.ServiceCIDR
if cluster.Status.ServiceCIDR == "" {
cluster.Status.ServiceCIDR = defaultClusterServiceCIDR
}
log.Info("creating cluster service")
serviceIP, err := c.createClusterService(ctx, cluster, s)
if err != nil {
return err
}
if err := c.createClusterConfigs(ctx, cluster, s, serviceIP); err != nil {
return err
}
// creating statefulsets in case the user chose a persistence type other than ephermal
if err := c.server(ctx, cluster, s); err != nil {
return err
}
if err := c.agent(ctx, cluster, serviceIP, token); err != nil {
return err
}
if cluster.Spec.Expose != nil {
if cluster.Spec.Expose.Ingress != nil {
serverIngress, err := s.Ingress(ctx, c.Client)
if err != nil {
return err
}
if err := c.Client.Create(ctx, serverIngress); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
}
}
bootstrapSecret, err := bootstrap.Generate(ctx, cluster, serviceIP, token)
if err != nil {
return err
}
if err := c.Client.Create(ctx, bootstrapSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
if err := c.bindNodeProxyClusterRole(ctx, cluster); err != nil {
return err
}
return c.Client.Update(ctx, cluster)
}
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server, serviceIP string) error {
// create init node config
initServerConfig, err := server.Config(true, serviceIP)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, initServerConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create servers configuration
serverConfig, err := server.Config(false, serviceIP)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, serverConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
func (c *ClusterReconciler) createClusterService(ctx context.Context, cluster *v1alpha1.Cluster, s *server.Server) (string, error) {
// create cluster service
clusterService := s.Service(cluster)
if err := controllerutil.SetControllerReference(cluster, clusterService, c.Scheme); err != nil {
return "", err
}
if err := c.Client.Create(ctx, clusterService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return "", err
}
}
var service v1.Service
objKey := ctrlruntimeclient.ObjectKey{
Namespace: cluster.Namespace,
Name: server.ServiceName(cluster.Name),
}
if err := c.Client.Get(ctx, objKey, &service); err != nil {
return "", err
}
return service.Spec.ClusterIP, nil
}
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) error {
// create headless service for the statefulset
serverStatefulService := server.StatefulServerService()
if err := controllerutil.SetControllerReference(cluster, serverStatefulService, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverStatefulService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
ServerStatefulSet, err := server.StatefulServer(ctx)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, ServerStatefulSet, c.Scheme); err != nil {
return err
}
if err := c.ensure(ctx, ServerStatefulSet, false); err != nil {
return err
}
return nil
}
func (c *ClusterReconciler) bindNodeProxyClusterRole(ctx context.Context, cluster *v1alpha1.Cluster) error {
clusterRoleBinding := &rbacv1.ClusterRoleBinding{}
if err := c.Client.Get(ctx, types.NamespacedName{Name: "k3k-node-proxy"}, clusterRoleBinding); err != nil {
return fmt.Errorf("failed to get or find k3k-node-proxy ClusterRoleBinding: %w", err)
}
subjectName := controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName)
found := false
for _, subject := range clusterRoleBinding.Subjects {
if subject.Name == subjectName && subject.Namespace == cluster.Namespace {
found = true
}
}
if !found {
clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbacv1.Subject{
Kind: "ServiceAccount",
Name: subjectName,
Namespace: cluster.Namespace,
})
}
return c.Client.Update(ctx, clusterRoleBinding)
}
func (c *ClusterReconciler) unbindNodeProxyClusterRole(ctx context.Context, cluster *v1alpha1.Cluster) error {
clusterRoleBinding := &rbacv1.ClusterRoleBinding{}
if err := c.Client.Get(ctx, types.NamespacedName{Name: "k3k-node-proxy"}, clusterRoleBinding); err != nil {
return fmt.Errorf("failed to get or find k3k-node-proxy ClusterRoleBinding: %w", err)
}
subjectName := controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName)
var cleanedSubjects []rbacv1.Subject
for _, subject := range clusterRoleBinding.Subjects {
if subject.Name != subjectName || subject.Namespace != cluster.Namespace {
cleanedSubjects = append(cleanedSubjects, subject)
}
}
// if no subject was removed, all good
if reflect.DeepEqual(clusterRoleBinding.Subjects, cleanedSubjects) {
return nil
}
clusterRoleBinding.Subjects = cleanedSubjects
return c.Client.Update(ctx, clusterRoleBinding)
}
func (c *ClusterReconciler) agent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
agent := agent.New(cluster, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token)
agentsConfig := agent.Config()
agentResources, err := agent.Resources()
if err != nil {
return err
}
agentResources = append(agentResources, agentsConfig)
return c.ensureAll(ctx, cluster, agentResources)
}
func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster) error {
if cluster.Name == ClusterInvalidName {
return errors.New("invalid cluster name " + cluster.Name + " no action will be taken")
}
return nil
}
func (c *ClusterReconciler) ensureAll(ctx context.Context, cluster *v1alpha1.Cluster, objs []ctrlruntimeclient.Object) error {
for _, obj := range objs {
if err := controllerutil.SetControllerReference(cluster, obj, c.Scheme); err != nil {
return err
}
if err := c.ensure(ctx, obj, false); err != nil {
return err
}
}
return nil
}
func (c *ClusterReconciler) ensure(ctx context.Context, obj ctrlruntimeclient.Object, requiresRecreate bool) error {
exists := true
existingObject := obj.DeepCopyObject().(ctrlruntimeclient.Object)
if err := c.Client.Get(ctx, types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}, existingObject); err != nil {
if !apierrors.IsNotFound(err) {
return fmt.Errorf("failed to get Object(%T): %w", existingObject, err)
}
exists = false
}
if !exists {
// if not exists create object
if err := c.Client.Create(ctx, obj); err != nil {
return err
}
return nil
}
// if exists then apply udpate or recreate if necessary
if reflect.DeepEqual(obj.(metav1.Object), existingObject.(metav1.Object)) {
return nil
}
if !requiresRecreate {
if err := c.Client.Update(ctx, obj); err != nil {
return err
}
} else {
// this handles object that needs recreation including configmaps and secrets
if err := c.Client.Delete(ctx, obj); err != nil {
return err
}
if err := c.Client.Create(ctx, obj); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,91 @@
package cluster_test
import (
"context"
"path/filepath"
"testing"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/log"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestController(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Cluster Controller Suite")
}
var (
testEnv *envtest.Environment
k8s *kubernetes.Clientset
k8sClient client.Client
ctx context.Context
cancel context.CancelFunc
)
var _ = BeforeSuite(func() {
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
ErrorIfCRDPathMissing: true,
}
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
k8s, err = kubernetes.NewForConfig(cfg)
Expect(err).NotTo(HaveOccurred())
scheme := buildScheme()
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
ctx, cancel = context.WithCancel(context.Background())
err = cluster.Add(ctx, mgr, "", "", &log.Logger{SugaredLogger: zap.NewNop().Sugar()})
Expect(err).NotTo(HaveOccurred())
go func() {
defer GinkgoRecover()
err = mgr.Start(ctx)
Expect(err).NotTo(HaveOccurred(), "failed to run manager")
}()
})
var _ = AfterSuite(func() {
cancel()
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
func buildScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
err := corev1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = appsv1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = networkingv1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
}

View File

@@ -0,0 +1,68 @@
package cluster_test
import (
"context"
"fmt"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Cluster Controller", func() {
Context("creating a Cluster", func() {
var (
namespace string
)
BeforeEach(func() {
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
err := k8sClient.Create(context.Background(), createdNS)
Expect(err).To(Not(HaveOccurred()))
namespace = createdNS.Name
})
When("created with a default spec", func() {
It("should have been created with some defaults", func() {
cluster := &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
}
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.Mode).To(Equal(v1alpha1.SharedClusterMode))
Expect(cluster.Spec.Agents).To(Equal(ptr.To[int32](0)))
Expect(cluster.Spec.Servers).To(Equal(ptr.To[int32](1)))
Expect(cluster.Spec.Version).To(BeEmpty())
serverVersion, err := k8s.DiscoveryClient.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
expectedHostVersion := fmt.Sprintf("v%s.%s.0-k3s1", serverVersion.Major, serverVersion.Minor)
Eventually(func() string {
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)
Expect(err).To(Not(HaveOccurred()))
return cluster.Status.HostVersion
}).
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Equal(expectedHostVersion))
})
})
})
})

View File

@@ -1,34 +0,0 @@
package config
import (
"fmt"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func Agent(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
config := agentData(serviceIP, cluster.Spec.Token)
return v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "k3k-agent-config",
Namespace: util.ClusterNamespace(cluster),
},
Data: map[string][]byte{
"config.yaml": []byte(config),
},
}
}
func agentData(serviceIP, token string) string {
return fmt.Sprintf(`server: https://%s:6443
token: %s
with-node-id: true`, serviceIP, token)
}

View File

@@ -1,68 +0,0 @@
package config
import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func Server(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.Secret, error) {
name := "k3k-server-config"
if init {
name = "k3k-init-server-config"
}
config := serverConfigData(serviceIP, cluster)
if init {
config = initConfigData(cluster)
}
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: util.ClusterNamespace(cluster),
},
Data: map[string][]byte{
"config.yaml": []byte(config),
},
}, nil
}
func serverConfigData(serviceIP string, cluster *v1alpha1.Cluster) string {
return "cluster-init: true\nserver: https://" + serviceIP + ":6443\n" + serverOptions(cluster)
}
func initConfigData(cluster *v1alpha1.Cluster) string {
return "cluster-init: true\n" + serverOptions(cluster)
}
func serverOptions(cluster *v1alpha1.Cluster) string {
var opts string
// TODO: generate token if not found
if cluster.Spec.Token != "" {
opts = "token: " + cluster.Spec.Token + "\n"
}
if cluster.Status.ClusterCIDR != "" {
opts = opts + "cluster-cidr: " + cluster.Status.ClusterCIDR + "\n"
}
if cluster.Status.ServiceCIDR != "" {
opts = opts + "service-cidr: " + cluster.Status.ServiceCIDR + "\n"
}
if cluster.Spec.ClusterDNS != "" {
opts = opts + "cluster-dns: " + cluster.Spec.ClusterDNS + "\n"
}
if len(cluster.Spec.TLSSANs) > 0 {
opts = opts + "tls-san:\n"
for _, addr := range cluster.Spec.TLSSANs {
opts = opts + "- " + addr + "\n"
}
}
// TODO: Add extra args to the options
return opts
}

View File

@@ -1,394 +0,0 @@
package cluster
import (
"context"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/config"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
EphermalNodesType = "ephermal"
DynamicNodesType = "dynamic"
)
type ClusterReconciler struct {
Client client.Client
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
func Add(ctx context.Context, mgr manager.Manager) error {
// initialize a new Reconciler
reconciler := ClusterReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
// create a new controller and add it to the manager
//this can be replaced by the new builder functionality in controller-runtime
controller, err := controller.New(clusterController, mgr, controller.Options{
Reconciler: &reconciler,
MaxConcurrentReconciles: 1,
})
if err != nil {
return err
}
return controller.Watch(&source.Kind{Type: &v1alpha1.Cluster{}}, &handler.EnqueueRequestForObject{})
}
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
var cluster v1alpha1.Cluster
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
if cluster.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
controllerutil.AddFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
// we create a namespace for each new cluster
var ns v1.Namespace
objKey := client.ObjectKey{
Name: util.ClusterNamespace(&cluster),
}
if err := c.Client.Get(ctx, objKey, &ns); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, util.WrapErr("failed to get cluster namespace "+util.ClusterNamespace(&cluster), err)
}
}
klog.Infof("enqueue cluster [%s]", cluster.Name)
return reconcile.Result{}, c.createCluster(ctx, &cluster)
}
if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
klog.Infof("deleting cluster [%s]", cluster.Name)
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
if cluster.Spec.Persistence == nil {
// default to ephermal nodes
cluster.Spec.Persistence = &v1alpha1.PersistenceConfig{
Type: EphermalNodesType,
}
}
if err := c.Client.Update(ctx, cluster); err != nil {
return util.WrapErr("failed to update cluster with persistence type", err)
}
// create a new namespace for the cluster
if err := c.createNamespace(ctx, cluster); err != nil {
return util.WrapErr("failed to create ns", err)
}
klog.Infof("creating cluster service")
serviceIP, err := c.createClusterService(ctx, cluster)
if err != nil {
return util.WrapErr("failed to create cluster service", err)
}
if err := c.createClusterConfigs(ctx, cluster, serviceIP); err != nil {
return util.WrapErr("failed to create cluster configs", err)
}
// creating statefulsets in case the user chose a persistence type other than ephermal
if cluster.Spec.Persistence.Type != EphermalNodesType {
if cluster.Spec.Persistence.StorageRequestSize == "" {
// default to 1G of request size
cluster.Spec.Persistence.StorageRequestSize = "1G"
}
if err := c.createStatefulSets(ctx, cluster); err != nil {
return util.WrapErr("failed to create servers and agents statefulsets", err)
}
} else {
if err := c.createDeployments(ctx, cluster); err != nil {
return util.WrapErr("failed to create servers and agents deployment", err)
}
}
if cluster.Spec.Expose != nil {
if cluster.Spec.Expose.Ingress != nil {
serverIngress, err := server.Ingress(ctx, cluster, c.Client)
if err != nil {
return util.WrapErr("failed to create ingress object", err)
}
if err := c.Client.Create(ctx, serverIngress); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create server ingress", err)
}
}
}
}
kubeconfigSecret, err := server.GenerateNewKubeConfig(ctx, cluster, serviceIP)
if err != nil {
return util.WrapErr("failed to generate new kubeconfig", err)
}
if err := c.Client.Create(ctx, kubeconfigSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create kubeconfig secret", err)
}
}
return c.Client.Update(ctx, cluster)
}
func (c *ClusterReconciler) createNamespace(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create a new namespace for the cluster
namespace := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: util.ClusterNamespace(cluster),
},
}
if err := controllerutil.SetControllerReference(cluster, &namespace, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, &namespace); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create ns", err)
}
}
return nil
}
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string) error {
// create init node config
initServerConfig, err := config.Server(cluster, true, serviceIP)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, initServerConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create servers configuration
serverConfig, err := config.Server(cluster, false, serviceIP)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, serverConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create agents configuration
agentsConfig := config.Agent(cluster, serviceIP)
if err := controllerutil.SetControllerReference(cluster, &agentsConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, &agentsConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
func (c *ClusterReconciler) createClusterService(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
// create cluster service
clusterService := server.Service(cluster)
if err := controllerutil.SetControllerReference(cluster, clusterService, c.Scheme); err != nil {
return "", err
}
if err := c.Client.Create(ctx, clusterService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return "", err
}
}
var service v1.Service
objKey := client.ObjectKey{
Namespace: util.ClusterNamespace(cluster),
Name: "k3k-server-service",
}
if err := c.Client.Get(ctx, objKey, &service); err != nil {
return "", err
}
return service.Spec.ClusterIP, nil
}
func (c *ClusterReconciler) createDeployments(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create deployment for the init server
// the init deployment must have only 1 replica
initServerDeployment := server.Server(cluster, true)
if err := controllerutil.SetControllerReference(cluster, initServerDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create deployment for the rest of the servers
serversDeployment := server.Server(cluster, false)
if err := controllerutil.SetControllerReference(cluster, serversDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serversDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
agentsDeployment := agent.Agent(cluster)
if err := controllerutil.SetControllerReference(cluster, agentsDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, agentsDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
func (c *ClusterReconciler) createStatefulSets(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create headless service for the init statefulset
initServerStatefulService := server.StatefulServerService(cluster, true)
if err := controllerutil.SetControllerReference(cluster, initServerStatefulService, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerStatefulService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create statefulsets for the init server
// the init statefulset must have only 1 replica
initServerStatefulSet := server.StatefulServer(cluster, true)
if err := controllerutil.SetControllerReference(cluster, initServerStatefulSet, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerStatefulSet); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create statefulset for the rest of the servers
// create headless service for the server statefulset
serverStatefulService := server.StatefulServerService(cluster, false)
if err := controllerutil.SetControllerReference(cluster, serverStatefulService, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverStatefulService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
serversStatefulSet := server.StatefulServer(cluster, false)
if err := controllerutil.SetControllerReference(cluster, serversStatefulSet, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serversStatefulSet); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create headless service for the agents statefulset
agentStatefulService := agent.StatefulAgentService(cluster)
if err := controllerutil.SetControllerReference(cluster, agentStatefulService, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, agentStatefulService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
agentsStatefulSet := agent.StatefulAgent(cluster)
if err := controllerutil.SetControllerReference(cluster, agentsStatefulSet, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, agentsStatefulSet); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
func (c *ClusterReconciler) createCIDRPools(ctx context.Context) error {
if err := c.Client.Create(ctx, &v1alpha1.CIDRAllocationPool{}); err != nil {
if !apierrors.IsAlreadyExists(err) {
// return nil since the resource has
// already been created
return err
}
}
if err := c.Client.Create(ctx, &v1alpha1.CIDRAllocationPool{}); err != nil {
if !apierrors.IsAlreadyExists(err) {
// return nil since the resource has
// already been created
return err
}
}
return nil
}

View File

@@ -0,0 +1,241 @@
package cluster
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"strings"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"github.com/rancher/k3k/pkg/log"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
clientv3 "go.etcd.io/etcd/client/v3"
"go.uber.org/zap"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
podController = "k3k-pod-controller"
)
type PodReconciler struct {
Client ctrlruntimeclient.Client
Scheme *runtime.Scheme
logger *log.Logger
}
// Add adds a new controller to the manager
func AddPodController(ctx context.Context, mgr manager.Manager, logger *log.Logger) error {
// initialize a new Reconciler
reconciler := PodReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
logger: logger.Named(podController),
}
return ctrl.NewControllerManagedBy(mgr).
Watches(&v1.Pod{}, handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &apps.StatefulSet{}, handler.OnlyControllerOwner())).
Named(podController).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Complete(&reconciler)
}
func (p *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := p.logger.With("Pod", req.NamespacedName)
s := strings.Split(req.Name, "-")
if len(s) < 1 {
return reconcile.Result{}, nil
}
if s[0] != "k3k" {
return reconcile.Result{}, nil
}
clusterName := s[1]
var cluster v1alpha1.Cluster
if err := p.Client.Get(ctx, types.NamespacedName{Name: clusterName}, &cluster); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
}
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: req.Namespace}
matchingLabels.ApplyToList(listOpts)
var podList v1.PodList
if err := p.Client.List(ctx, &podList, listOpts); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
for _, pod := range podList.Items {
log.Info("Handle etcd server pod")
if err := p.handleServerPod(ctx, cluster, &pod, log); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
func (p *PodReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cluster, pod *v1.Pod, log *zap.SugaredLogger) error {
if _, ok := pod.Labels["role"]; ok {
if pod.Labels["role"] != "server" {
return nil
}
} else {
return fmt.Errorf("server pod has no role label")
}
// if etcd pod is marked for deletion then we need to remove it from the etcd member list before deletion
if !pod.DeletionTimestamp.IsZero() {
// check if cluster is deleted then remove the finalizer from the pod
if cluster.Name == "" {
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
return nil
}
tlsConfig, err := p.getETCDTLS(ctx, &cluster, log)
if err != nil {
return err
}
// remove server from etcd
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{
fmt.Sprintf("https://%s.%s:2379", server.ServiceName(cluster.Name), pod.Namespace),
},
TLS: tlsConfig,
})
if err != nil {
return err
}
if err := removePeer(ctx, client, pod.Name, pod.Status.PodIP, log); err != nil {
return err
}
// remove our finalizer from the list and update it.
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
}
if !controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.AddFinalizer(pod, etcdPodFinalizerName)
return p.Client.Update(ctx, pod)
}
return nil
}
func (p *PodReconciler) getETCDTLS(ctx context.Context, cluster *v1alpha1.Cluster, log *zap.SugaredLogger) (*tls.Config, error) {
log.Infow("generating etcd TLS client certificate", "Cluster", cluster.Name, "Namespace", cluster.Namespace)
token, err := p.clusterToken(ctx, cluster)
if err != nil {
return nil, err
}
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
var b *bootstrap.ControlRuntimeBootstrap
if err := retry.OnError(k3kcontroller.Backoff, func(err error) bool {
return true
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, err
}
etcdCert, etcdKey, err := certs.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 0, b.ETCDServerCA.Content, b.ETCDServerCAKey.Content)
if err != nil {
return nil, err
}
clientCert, err := tls.X509KeyPair(etcdCert, etcdKey)
if err != nil {
return nil, err
}
// create rootCA CertPool
cert, err := certutil.ParseCertsPEM([]byte(b.ETCDServerCA.Content))
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
pool.AddCert(cert[0])
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}
// removePeer removes a peer from the cluster. The peer name and IP address must both match.
func removePeer(ctx context.Context, client *clientv3.Client, name, address string, log *zap.SugaredLogger) error {
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
defer cancel()
members, err := client.MemberList(ctx)
if err != nil {
return err
}
for _, member := range members.Members {
if !strings.Contains(member.Name, name) {
continue
}
for _, peerURL := range member.PeerURLs {
u, err := url.Parse(peerURL)
if err != nil {
return err
}
if u.Hostname() == address {
log.Infow("Removing member from etcd", "name", member.Name, "id", member.ID, "address", address)
_, err := client.MemberRemove(ctx, member.ID)
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
return nil
}
return err
}
}
}
return nil
}
func (p *PodReconciler) clusterToken(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
var tokenSecret v1.Secret
nn := types.NamespacedName{
Name: TokenSecretName(cluster.Name),
Namespace: cluster.Namespace,
}
if cluster.Spec.TokenSecretRef != nil {
nn.Name = TokenSecretName(cluster.Name)
}
if err := p.Client.Get(ctx, nn, &tokenSecret); err != nil {
return "", err
}
if _, ok := tokenSecret.Data["token"]; !ok {
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
}
return string(tokenSecret.Data["token"]), nil
}

View File

@@ -0,0 +1,173 @@
package bootstrap
import (
"context"
"crypto/tls"
"encoding/base64"
"encoding/json"
"net/http"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
)
type ControlRuntimeBootstrap struct {
ServerCA content `json:"serverCA"`
ServerCAKey content `json:"serverCAKey"`
ClientCA content `json:"clientCA"`
ClientCAKey content `json:"clientCAKey"`
ETCDServerCA content `json:"etcdServerCA"`
ETCDServerCAKey content `json:"etcdServerCAKey"`
}
type content struct {
Timestamp string
Content string
}
// Generate generates the bootstrap for the cluster:
// 1- use the server token to get the bootstrap data from k3s
// 2- save the bootstrap data as a secret
func Generate(ctx context.Context, cluster *v1alpha1.Cluster, ip, token string) (*v1.Secret, error) {
var bootstrap *ControlRuntimeBootstrap
if err := retry.OnError(controller.Backoff, func(err error) bool {
return true
}, func() error {
var err error
bootstrap, err = requestBootstrap(token, ip)
return err
}); err != nil {
return nil, err
}
if err := decodeBootstrap(bootstrap); err != nil {
return nil, err
}
bootstrapData, err := json.Marshal(bootstrap)
if err != nil {
return nil, err
}
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: controller.SafeConcatNameWithPrefix(cluster.Name, "bootstrap"),
Namespace: cluster.Namespace,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: cluster.APIVersion,
Kind: cluster.Kind,
Name: cluster.Name,
UID: cluster.UID,
},
},
},
Data: map[string][]byte{
"bootstrap": bootstrapData,
},
}, nil
}
func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error) {
url := "https://" + serverIP + ":6443/v1-k3s/server-bootstrap"
client := http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
Timeout: 5 * time.Second,
}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", "Basic "+basicAuth("server", token))
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var runtimeBootstrap ControlRuntimeBootstrap
if err := json.NewDecoder(resp.Body).Decode(&runtimeBootstrap); err != nil {
return nil, err
}
return &runtimeBootstrap, nil
}
func basicAuth(username, password string) string {
auth := username + ":" + password
return base64.StdEncoding.EncodeToString([]byte(auth))
}
func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
//client-ca
decoded, err := base64.StdEncoding.DecodeString(bootstrap.ClientCA.Content)
if err != nil {
return err
}
bootstrap.ClientCA.Content = string(decoded)
//client-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ClientCAKey.Content)
if err != nil {
return err
}
bootstrap.ClientCAKey.Content = string(decoded)
//server-ca
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCA.Content)
if err != nil {
return err
}
bootstrap.ServerCA.Content = string(decoded)
//server-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCAKey.Content)
if err != nil {
return err
}
bootstrap.ServerCAKey.Content = string(decoded)
//etcd-ca
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ETCDServerCA.Content)
if err != nil {
return err
}
bootstrap.ETCDServerCA.Content = string(decoded)
//etcd-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ETCDServerCAKey.Content)
if err != nil {
return err
}
bootstrap.ETCDServerCAKey.Content = string(decoded)
return nil
}
func DecodedBootstrap(token, ip string) (*ControlRuntimeBootstrap, error) {
bootstrap, err := requestBootstrap(token, ip)
if err != nil {
return nil, err
}
if err := decodeBootstrap(bootstrap); err != nil {
return nil, err
}
return bootstrap, nil
}

View File

@@ -0,0 +1,83 @@
package server
import (
"fmt"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) {
name := configSecretName(s.cluster.Name, init)
s.cluster.Status.TLSSANs = append(s.cluster.Spec.TLSSANs,
serviceIP,
ServiceName(s.cluster.Name),
fmt.Sprintf("%s.%s", ServiceName(s.cluster.Name), s.cluster.Namespace),
)
config := serverConfigData(serviceIP, s.cluster, s.token)
if init {
config = initConfigData(s.cluster, s.token)
}
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: s.cluster.Namespace,
},
Data: map[string][]byte{
"config.yaml": []byte(config),
},
}, nil
}
func serverConfigData(serviceIP string, cluster *v1alpha1.Cluster, token string) string {
return "cluster-init: true\nserver: https://" + serviceIP + ":6443\n" + serverOptions(cluster, token)
}
func initConfigData(cluster *v1alpha1.Cluster, token string) string {
return "cluster-init: true\n" + serverOptions(cluster, token)
}
func serverOptions(cluster *v1alpha1.Cluster, token string) string {
var opts string
// TODO: generate token if not found
if token != "" {
opts = "token: " + token + "\n"
}
if cluster.Status.ClusterCIDR != "" {
opts = opts + "cluster-cidr: " + cluster.Status.ClusterCIDR + "\n"
}
if cluster.Status.ServiceCIDR != "" {
opts = opts + "service-cidr: " + cluster.Status.ServiceCIDR + "\n"
}
if cluster.Spec.ClusterDNS != "" {
opts = opts + "cluster-dns: " + cluster.Spec.ClusterDNS + "\n"
}
if len(cluster.Status.TLSSANs) > 0 {
opts = opts + "tls-san:\n"
for _, addr := range cluster.Status.TLSSANs {
opts = opts + "- " + addr + "\n"
}
}
if cluster.Spec.Mode != agent.VirtualNodeMode {
opts = opts + "disable-agent: true\negress-selector-mode: disabled\ndisable:\n- servicelb\n- traefik\n- metrics-server\n- local-storage"
}
// TODO: Add extra args to the options
return opts
}
func configSecretName(clusterName string, init bool) string {
if !init {
return controller.SafeConcatNameWithPrefix(clusterName, configName)
}
return controller.SafeConcatNameWithPrefix(clusterName, initConfigName)
}

View File

@@ -3,8 +3,7 @@ package server
import (
"context"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/controller"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -16,42 +15,44 @@ const (
nginxSSLPassthroughAnnotation = "nginx.ingress.kubernetes.io/ssl-passthrough"
nginxBackendProtocolAnnotation = "nginx.ingress.kubernetes.io/backend-protocol"
nginxSSLRedirectAnnotation = "nginx.ingress.kubernetes.io/ssl-redirect"
servicePort = 443
serverPort = 6443
etcdPort = 2379
)
func Ingress(ctx context.Context, cluster *v1alpha1.Cluster, client client.Client) (*networkingv1.Ingress, error) {
addresses, err := util.Addresses(ctx, client)
func (s *Server) Ingress(ctx context.Context, client client.Client) (*networkingv1.Ingress, error) {
addresses, err := controller.Addresses(ctx, client)
if err != nil {
return nil, err
}
ingressRules := ingressRules(cluster, addresses)
ingressRules := s.ingressRules(addresses)
ingress := &networkingv1.Ingress{
TypeMeta: metav1.TypeMeta{
Kind: "Ingress",
APIVersion: "networking.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-server-ingress",
Namespace: util.ClusterNamespace(cluster),
Name: controller.SafeConcatNameWithPrefix(s.cluster.Name, "ingress"),
Namespace: s.cluster.Namespace,
},
Spec: networkingv1.IngressSpec{
IngressClassName: &cluster.Spec.Expose.Ingress.IngressClassName,
IngressClassName: &s.cluster.Spec.Expose.Ingress.IngressClassName,
Rules: ingressRules,
},
}
configureIngressOptions(ingress, cluster.Spec.Expose.Ingress.IngressClassName)
configureIngressOptions(ingress, s.cluster.Spec.Expose.Ingress.IngressClassName)
return ingress, nil
}
func ingressRules(cluster *v1alpha1.Cluster, addresses []string) []networkingv1.IngressRule {
func (s *Server) ingressRules(addresses []string) []networkingv1.IngressRule {
var ingressRules []networkingv1.IngressRule
pathTypePrefix := networkingv1.PathTypePrefix
for _, address := range addresses {
rule := networkingv1.IngressRule{
Host: cluster.Name + "." + address + wildcardDNS,
Host: s.cluster.Name + "." + address + wildcardDNS,
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
@@ -60,9 +61,9 @@ func ingressRules(cluster *v1alpha1.Cluster, addresses []string) []networkingv1.
PathType: &pathTypePrefix,
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "k3k-server-service",
Name: ServiceName(s.cluster.Name),
Port: networkingv1.ServiceBackendPort{
Number: 6443,
Number: serverPort,
},
},
},

View File

@@ -1,237 +0,0 @@
package server
import (
"context"
"crypto"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"time"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
)
const (
adminCommonName = "system:admin"
port = 6443
)
type controlRuntimeBootstrap struct {
ServerCA content
ServerCAKey content
ClientCA content
ClientCAKey content
}
type content struct {
Timestamp string
Content string
}
// GenerateNewKubeConfig generates the kubeconfig for the cluster:
// 1- use the server token to get the bootstrap data from k3s
// 2- generate client admin cert/key
// 3- use the ca cert from the bootstrap data & admin cert/key to write a new kubeconfig
// 4- save the new kubeconfig as a secret
func GenerateNewKubeConfig(ctx context.Context, cluster *v1alpha1.Cluster, ip string) (*v1.Secret, error) {
token := cluster.Spec.Token
var bootstrap *controlRuntimeBootstrap
if err := retry.OnError(retry.DefaultBackoff, func(err error) bool {
return true
}, func() error {
var err error
bootstrap, err = requestBootstrap(token, ip)
return err
}); err != nil {
return nil, err
}
if err := decodeBootstrap(bootstrap); err != nil {
return nil, err
}
adminCert, adminKey, err := createClientCertKey(
adminCommonName, []string{user.SystemPrivilegedGroup},
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
bootstrap.ClientCA.Content,
bootstrap.ClientCAKey.Content)
if err != nil {
return nil, err
}
url := fmt.Sprintf("https://%s:%d", ip, port)
kubeconfigData, err := kubeconfig(url, []byte(bootstrap.ServerCA.Content), adminCert, adminKey)
if err != nil {
return nil, err
}
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-kubeconfig",
Namespace: util.ClusterNamespace(cluster),
},
Data: map[string][]byte{
"kubeconfig.yaml": kubeconfigData,
},
}, nil
}
func requestBootstrap(token, serverIP string) (*controlRuntimeBootstrap, error) {
url := "https://" + serverIP + ":6443/v1-k3s/server-bootstrap"
client := http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
Timeout: 5 * time.Second,
}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", "Basic "+basicAuth("server", token))
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var runtimeBootstrap controlRuntimeBootstrap
if err := json.NewDecoder(resp.Body).Decode(&runtimeBootstrap); err != nil {
return nil, err
}
return &runtimeBootstrap, nil
}
func createClientCertKey(commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, caCert, caKey string) ([]byte, []byte, error) {
caKeyPEM, err := certutil.ParsePrivateKeyPEM([]byte(caKey))
if err != nil {
return nil, nil, err
}
caCertPEM, err := certutil.ParseCertsPEM([]byte(caCert))
if err != nil {
return nil, nil, err
}
b, err := generateKey()
if err != nil {
return nil, nil, err
}
key, err := certutil.ParsePrivateKeyPEM(b)
if err != nil {
return nil, nil, err
}
cfg := certutil.Config{
CommonName: commonName,
Organization: organization,
Usages: extKeyUsage,
}
if altNames != nil {
cfg.AltNames = *altNames
}
cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCertPEM[0], caKeyPEM.(crypto.Signer))
if err != nil {
return nil, nil, err
}
return append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCertPEM[0])...), b, nil
}
func generateKey() (data []byte, err error) {
generatedData, err := certutil.MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, fmt.Errorf("error generating key: %v", err)
}
return generatedData, nil
}
func kubeconfig(url string, serverCA, clientCert, clientKey []byte) ([]byte, error) {
config := clientcmdapi.NewConfig()
cluster := clientcmdapi.NewCluster()
cluster.CertificateAuthorityData = serverCA
cluster.Server = url
authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientCertificateData = clientCert
authInfo.ClientKeyData = clientKey
context := clientcmdapi.NewContext()
context.AuthInfo = "default"
context.Cluster = "default"
config.Clusters["default"] = cluster
config.AuthInfos["default"] = authInfo
config.Contexts["default"] = context
config.CurrentContext = "default"
kubeconfig, err := clientcmd.Write(*config)
if err != nil {
return nil, err
}
return kubeconfig, nil
}
func basicAuth(username, password string) string {
auth := username + ":" + password
return base64.StdEncoding.EncodeToString([]byte(auth))
}
func decodeBootstrap(bootstrap *controlRuntimeBootstrap) error {
//client-ca
decoded, err := base64.StdEncoding.DecodeString(bootstrap.ClientCA.Content)
if err != nil {
return err
}
bootstrap.ClientCA.Content = string(decoded)
//client-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ClientCAKey.Content)
if err != nil {
return err
}
bootstrap.ClientCAKey.Content = string(decoded)
//server-ca
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCA.Content)
if err != nil {
return err
}
bootstrap.ServerCA.Content = string(decoded)
//server-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCAKey.Content)
if err != nil {
return err
}
bootstrap.ServerCAKey.Content = string(decoded)
return nil
}

View File

@@ -1,77 +1,78 @@
package server
import (
"strconv"
"context"
"strings"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
serverName = "k3k-server"
initServerName = "k3k-init-server"
k3kSystemNamespace = "k3k-system"
serverName = "server"
configName = "server-config"
initConfigName = "init-server-config"
ServerPort = 6443
EphermalNodesType = "ephermal"
DynamicNodesType = "dynamic"
)
func Server(cluster *v1alpha1.Cluster, init bool) *apps.Deployment {
var replicas int32
image := util.K3SImage(cluster)
// Server
type Server struct {
cluster *v1alpha1.Cluster
client client.Client
mode string
token string
}
name := serverName
if init {
name = initServerName
}
replicas = *cluster.Spec.Servers - 1
if init {
replicas = 1
}
return &apps.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + name,
Namespace: util.ClusterNamespace(cluster),
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"cluster": cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
},
Spec: serverPodSpec(image, name, cluster.Spec.ServerArgs, false),
},
},
func New(cluster *v1alpha1.Cluster, client client.Client, token, mode string) *Server {
return &Server{
cluster: cluster,
client: client,
token: token,
mode: mode,
}
}
func serverPodSpec(image, name string, args []string, statefulSet bool) v1.PodSpec {
args = append([]string{"server", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
func (s *Server) podSpec(image, name string, persistent bool) v1.PodSpec {
var limit v1.ResourceList
if s.cluster.Spec.Limit != nil && s.cluster.Spec.Limit.ServerLimit != nil {
limit = s.cluster.Spec.Limit.ServerLimit
}
podSpec := v1.PodSpec{
NodeSelector: s.cluster.Spec.NodeSelector,
PriorityClassName: s.cluster.Spec.PriorityClass,
Volumes: []v1.Volume{
{
Name: "initconfig",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: configSecretName(s.cluster.Name, true),
Items: []v1.KeyToPath{
{
Key: "config.yaml",
Path: "config.yaml",
},
},
},
},
},
{
Name: "config",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name + "-config",
SecretName: configSecretName(s.cluster.Name, false),
Items: []v1.KeyToPath{
{
Key: "config.yaml",
@@ -110,17 +111,39 @@ func serverPodSpec(image, name string, args []string, statefulSet bool) v1.PodSp
{
Name: name,
Image: image,
SecurityContext: &v1.SecurityContext{
Privileged: pointer.Bool(true),
Resources: v1.ResourceRequirements{
Limits: limit,
},
Env: []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
},
Command: []string{
"/bin/k3s",
"/bin/sh",
"-c",
`
if [ ${POD_NAME: -1} == 0 ]; then
/bin/k3s server --config /opt/rancher/k3s/init/config.yaml ` + strings.Join(s.cluster.Spec.ServerArgs, " ") + `
else
/bin/k3s server --config /opt/rancher/k3s/server/config.yaml ` + strings.Join(s.cluster.Spec.ServerArgs, " ") + `
fi
`,
},
Args: args,
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
MountPath: "/opt/rancher/k3s/",
MountPath: "/opt/rancher/k3s/server",
ReadOnly: false,
},
{
Name: "initconfig",
MountPath: "/opt/rancher/k3s/init",
ReadOnly: false,
},
{
@@ -157,7 +180,8 @@ func serverPodSpec(image, name string, args []string, statefulSet bool) v1.PodSp
},
},
}
if !statefulSet {
if !persistent {
podSpec.Volumes = append(podSpec.Volumes, v1.Volume{
Name: "varlibkubelet",
@@ -173,92 +197,171 @@ func serverPodSpec(image, name string, args []string, statefulSet bool) v1.PodSp
},
)
}
// Adding readiness probes to statefulset
podSpec.Containers[0].ReadinessProbe = &v1.Probe{
InitialDelaySeconds: 60,
FailureThreshold: 5,
TimeoutSeconds: 10,
ProbeHandler: v1.ProbeHandler{
TCPSocket: &v1.TCPSocketAction{
Port: intstr.FromInt(6443),
},
},
}
// start the pod unprivileged in shared mode
if s.mode == agent.VirtualNodeMode {
podSpec.Containers[0].SecurityContext = &v1.SecurityContext{
Privileged: ptr.To(true),
}
}
return podSpec
}
func StatefulServer(cluster *v1alpha1.Cluster, init bool) *apps.StatefulSet {
var replicas int32
image := util.K3SImage(cluster)
func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error) {
var (
replicas int32
pvClaims []v1.PersistentVolumeClaim
persistent bool
)
image := controller.K3SImage(s.cluster)
name := controller.SafeConcatNameWithPrefix(s.cluster.Name, serverName)
name := serverName
if init {
name = initServerName
replicas = *s.cluster.Spec.Servers
if s.cluster.Spec.Persistence != nil && s.cluster.Spec.Persistence.Type != EphermalNodesType {
persistent = true
pvClaims = []v1.PersistentVolumeClaim{
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibrancherk3s",
Namespace: s.cluster.Namespace,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &s.cluster.Spec.Persistence.StorageClassName,
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(s.cluster.Spec.Persistence.StorageRequestSize),
},
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibkubelet",
Namespace: s.cluster.Namespace,
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(s.cluster.Spec.Persistence.StorageRequestSize),
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &s.cluster.Spec.Persistence.StorageClassName,
},
},
}
}
replicas = *cluster.Spec.Servers - 1
if init {
replicas = 1
var volumes []v1.Volume
var volumeMounts []v1.VolumeMount
for _, addon := range s.cluster.Spec.Addons {
namespace := k3kSystemNamespace
if addon.SecretNamespace != "" {
namespace = addon.SecretNamespace
}
nn := types.NamespacedName{
Name: addon.SecretRef,
Namespace: namespace,
}
var addons v1.Secret
if err := s.client.Get(ctx, nn, &addons); err != nil {
return nil, err
}
clusterAddons := v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: addons.Name,
Namespace: s.cluster.Namespace,
},
Data: make(map[string][]byte, len(addons.Data)),
}
for k, v := range addons.Data {
clusterAddons.Data[k] = v
}
if err := s.client.Create(ctx, &clusterAddons); err != nil {
return nil, err
}
name := "varlibrancherk3smanifests" + addon.SecretRef
volume := v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: addon.SecretRef,
},
},
}
volumes = append(volumes, volume)
volumeMount := v1.VolumeMount{
Name: name,
MountPath: "/var/lib/rancher/k3s/server/manifests/" + addon.SecretRef,
// changes to this part of the filesystem shouldn't be done manually. The secret should be updated instead.
ReadOnly: true,
}
volumeMounts = append(volumeMounts, volumeMount)
}
selector := metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": s.cluster.Name,
"role": "server",
},
}
podSpec := s.podSpec(image, name, persistent)
podSpec.Volumes = append(podSpec.Volumes, volumes...)
podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volumeMounts...)
return &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
Kind: "StatefulSet",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + name,
Namespace: util.ClusterNamespace(cluster),
Name: name,
Namespace: s.cluster.Namespace,
Labels: selector.MatchLabels,
},
Spec: apps.StatefulSetSpec{
Replicas: &replicas,
ServiceName: cluster.Name + "-" + name + "-headless",
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
},
VolumeClaimTemplates: []v1.PersistentVolumeClaim{
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibrancherk3s",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibkubelet",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
},
},
},
Replicas: &replicas,
ServiceName: headlessServiceName(s.cluster.Name),
Selector: &selector,
VolumeClaimTemplates: pvClaims,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"cluster": cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
Labels: selector.MatchLabels,
},
Spec: serverPodSpec(image, name, cluster.Spec.ServerArgs, true),
Spec: podSpec,
},
},
}
}, nil
}

View File

@@ -1,15 +1,14 @@
package server
import (
"strconv"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/controller"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func Service(cluster *v1alpha1.Cluster) *v1.Service {
func (s *Server) Service(cluster *v1alpha1.Cluster) *v1.Service {
serviceType := v1.ServiceTypeClusterIP
if cluster.Spec.Expose != nil {
if cluster.Spec.Expose.NodePort != nil {
@@ -25,8 +24,8 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "k3k-server-service",
Namespace: util.ClusterNamespace(cluster),
Name: ServiceName(s.cluster.Name),
Namespace: cluster.Namespace,
},
Spec: v1.ServiceSpec{
Type: serviceType,
@@ -38,42 +37,67 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
{
Name: "k3s-server-port",
Protocol: v1.ProtocolTCP,
Port: 6443,
Port: serverPort,
},
{
Name: "k3s-service-port",
Protocol: v1.ProtocolTCP,
Port: servicePort,
TargetPort: intstr.FromInt(serverPort),
},
{
Name: "k3s-etcd-port",
Protocol: v1.ProtocolTCP,
Port: etcdPort,
},
},
},
}
}
func StatefulServerService(cluster *v1alpha1.Cluster, init bool) *v1.Service {
name := serverName
if init {
name = initServerName
}
func (s *Server) StatefulServerService() *v1.Service {
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + name + "-headless",
Namespace: util.ClusterNamespace(cluster),
Name: headlessServiceName(s.cluster.Name),
Namespace: s.cluster.Namespace,
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
ClusterIP: v1.ClusterIPNone,
Selector: map[string]string{
"cluster": cluster.Name,
"cluster": s.cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
Ports: []v1.ServicePort{
{
Name: "k3s-server-port",
Protocol: v1.ProtocolTCP,
Port: 6443,
Port: serverPort,
},
{
Name: "k3s-service-port",
Protocol: v1.ProtocolTCP,
Port: servicePort,
TargetPort: intstr.FromInt(serverPort),
},
{
Name: "k3s-etcd-port",
Protocol: v1.ProtocolTCP,
Port: etcdPort,
},
},
},
}
}
func ServiceName(clusterName string) string {
return controller.SafeConcatNameWithPrefix(clusterName, "service")
}
func headlessServiceName(clusterName string) string {
return controller.SafeConcatNameWithPrefix(clusterName, "service", "headless")
}

View File

@@ -0,0 +1,98 @@
package cluster
import (
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
func (c *ClusterReconciler) token(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
if cluster.Spec.TokenSecretRef == nil {
return c.ensureTokenSecret(ctx, cluster)
}
// get token data from secretRef
nn := types.NamespacedName{
Name: cluster.Spec.TokenSecretRef.Name,
Namespace: cluster.Spec.TokenSecretRef.Namespace,
}
var tokenSecret v1.Secret
if err := c.Client.Get(ctx, nn, &tokenSecret); err != nil {
return "", err
}
if _, ok := tokenSecret.Data["token"]; !ok {
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
}
return string(tokenSecret.Data["token"]), nil
}
func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
// check if the secret is already created
var (
tokenSecret v1.Secret
nn = types.NamespacedName{
Name: TokenSecretName(cluster.Name),
Namespace: cluster.Namespace,
}
)
if err := c.Client.Get(ctx, nn, &tokenSecret); err != nil {
if !apierrors.IsNotFound(err) {
return "", err
}
}
if tokenSecret.Data != nil {
return string(tokenSecret.Data["token"]), nil
}
c.logger.Info("Token secret is not specified, creating a random token")
token, err := random(16)
if err != nil {
return "", err
}
tokenSecret = TokenSecretObj(token, cluster.Name, cluster.Namespace)
if err := controllerutil.SetControllerReference(cluster, &tokenSecret, c.Scheme); err != nil {
return "", err
}
if err := c.ensure(ctx, &tokenSecret, false); err != nil {
return "", err
}
return token, nil
}
func random(size int) (string, error) {
token := make([]byte, size)
_, err := rand.Read(token)
if err != nil {
return "", err
}
return hex.EncodeToString(token), err
}
func TokenSecretObj(token, name, namespace string) v1.Secret {
return v1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: metav1.ObjectMeta{
Name: TokenSecretName(name),
Namespace: namespace,
},
Data: map[string][]byte{
"token": []byte(token),
},
}
}
func TokenSecretName(clusterName string) string {
return controller.SafeConcatNameWithPrefix(clusterName, "token")
}

View File

@@ -0,0 +1,332 @@
package clusterset
import (
"context"
"errors"
"reflect"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/log"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
clusterSetController = "k3k-clusterset-controller"
allTrafficCIDR = "0.0.0.0/0"
maxConcurrentReconciles = 1
)
type ClusterSetReconciler struct {
Client ctrlruntimeclient.Client
Scheme *runtime.Scheme
ClusterCIDR string
logger *log.Logger
}
// Add adds a new controller to the manager
func Add(ctx context.Context, mgr manager.Manager, clusterCIDR string, logger *log.Logger) error {
// initialize a new Reconciler
reconciler := ClusterSetReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
ClusterCIDR: clusterCIDR,
logger: logger.Named(clusterSetController),
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.ClusterSet{}).
Owns(&networkingv1.NetworkPolicy{}).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Watches(
&v1.Namespace{},
handler.EnqueueRequestsFromMapFunc(namespaceEventHandler(reconciler)),
builder.WithPredicates(namespaceLabelsPredicate()),
).
Watches(
&v1alpha1.Cluster{},
handler.EnqueueRequestsFromMapFunc(sameNamespaceEventHandler(reconciler)),
).
Complete(&reconciler)
}
// namespaceEventHandler will enqueue reconciling requests for all the ClusterSets in the changed namespace
func namespaceEventHandler(reconciler ClusterSetReconciler) handler.MapFunc {
return func(ctx context.Context, obj client.Object) []reconcile.Request {
var requests []reconcile.Request
var set v1alpha1.ClusterSetList
_ = reconciler.Client.List(ctx, &set, client.InNamespace(obj.GetName()))
for _, clusterSet := range set.Items {
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{
Name: clusterSet.Name,
Namespace: obj.GetName(),
},
})
}
return requests
}
}
// sameNamespaceEventHandler will enqueue reconciling requests for all the ClusterSets in the changed namespace
func sameNamespaceEventHandler(reconciler ClusterSetReconciler) handler.MapFunc {
return func(ctx context.Context, obj client.Object) []reconcile.Request {
var requests []reconcile.Request
var set v1alpha1.ClusterSetList
_ = reconciler.Client.List(ctx, &set, client.InNamespace(obj.GetNamespace()))
for _, clusterSet := range set.Items {
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{
Name: clusterSet.Name,
Namespace: obj.GetNamespace(),
},
})
}
return requests
}
}
// namespaceLabelsPredicate returns a predicate that will allow a reconciliation if the labels of a Namespace changed
func namespaceLabelsPredicate() predicate.Predicate {
return predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
oldObj := e.ObjectOld.(*v1.Namespace)
newObj := e.ObjectNew.(*v1.Namespace)
return !reflect.DeepEqual(oldObj.Labels, newObj.Labels)
},
}
}
func (c *ClusterSetReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := c.logger.With("ClusterSet", req.NamespacedName)
var clusterSet v1alpha1.ClusterSet
if err := c.Client.Get(ctx, req.NamespacedName, &clusterSet); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
if err := c.reconcileNetworkPolicy(ctx, log, &clusterSet); err != nil {
return reconcile.Result{}, err
}
if err := c.reconcileNamespacePodSecurityLabels(ctx, log, &clusterSet); err != nil {
return reconcile.Result{}, err
}
if err := c.reconcileClusters(ctx, log, &clusterSet); err != nil {
return reconcile.Result{}, err
}
// TODO: Add resource quota for clustersets
// if clusterSet.Spec.MaxLimits != nil {
// quota := v1.ResourceQuota{
// ObjectMeta: metav1.ObjectMeta{
// Name: "clusterset-quota",
// Namespace: clusterSet.Namespace,
// OwnerReferences: []metav1.OwnerReference{
// {
// UID: clusterSet.UID,
// Name: clusterSet.Name,
// APIVersion: clusterSet.APIVersion,
// Kind: clusterSet.Kind,
// },
// },
// },
// }
// quota.Spec.Hard = clusterSet.Spec.MaxLimits
// if err := c.Client.Create(ctx, &quota); err != nil {
// return reconcile.Result{}, fmt.Errorf("unable to create resource quota from cluster set: %w", err)
// }
// }
return reconcile.Result{}, nil
}
func (c *ClusterSetReconciler) reconcileNetworkPolicy(ctx context.Context, log *zap.SugaredLogger, clusterSet *v1alpha1.ClusterSet) error {
log.Info("reconciling NetworkPolicy")
networkPolicy, err := netpol(ctx, c.ClusterCIDR, clusterSet, c.Client)
if err != nil {
return err
}
if err = ctrl.SetControllerReference(clusterSet, networkPolicy, c.Scheme); err != nil {
return err
}
// if disabled then delete the existing network policy
if clusterSet.Spec.DisableNetworkPolicy {
err := c.Client.Delete(ctx, networkPolicy)
return client.IgnoreNotFound(err)
}
// otherwise try to create/update
err = c.Client.Create(ctx, networkPolicy)
if apierrors.IsAlreadyExists(err) {
return c.Client.Update(ctx, networkPolicy)
}
return err
}
func netpol(ctx context.Context, clusterCIDR string, clusterSet *v1alpha1.ClusterSet, client client.Client) (*networkingv1.NetworkPolicy, error) {
var cidrList []string
if clusterCIDR != "" {
cidrList = []string{clusterCIDR}
} else {
var nodeList v1.NodeList
if err := client.List(ctx, &nodeList); err != nil {
return nil, err
}
for _, node := range nodeList.Items {
cidrList = append(cidrList, node.Spec.PodCIDRs...)
}
}
return &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: clusterSet.Namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "NetworkPolicy",
APIVersion: "networking.k8s.io/v1",
},
Spec: networkingv1.NetworkPolicySpec{
PolicyTypes: []networkingv1.PolicyType{
networkingv1.PolicyTypeIngress,
networkingv1.PolicyTypeEgress,
},
Ingress: []networkingv1.NetworkPolicyIngressRule{
{},
},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
IPBlock: &networkingv1.IPBlock{
CIDR: allTrafficCIDR,
Except: cidrList,
},
},
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"kubernetes.io/metadata.name": clusterSet.Namespace,
},
},
},
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"kubernetes.io/metadata.name": metav1.NamespaceSystem,
},
},
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "kube-dns",
},
},
},
},
},
},
},
}, nil
}
func (c *ClusterSetReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, log *zap.SugaredLogger, clusterSet *v1alpha1.ClusterSet) error {
log.Info("reconciling Namespace")
var ns v1.Namespace
key := types.NamespacedName{Name: clusterSet.Namespace}
if err := c.Client.Get(ctx, key, &ns); err != nil {
return err
}
newLabels := map[string]string{}
for k, v := range ns.Labels {
newLabels[k] = v
}
// cleanup of old labels
delete(newLabels, "pod-security.kubernetes.io/enforce")
delete(newLabels, "pod-security.kubernetes.io/enforce-version")
delete(newLabels, "pod-security.kubernetes.io/warn")
delete(newLabels, "pod-security.kubernetes.io/warn-version")
// if a PSA level is specified add the proper labels
if clusterSet.Spec.PodSecurityAdmissionLevel != nil {
psaLevel := *clusterSet.Spec.PodSecurityAdmissionLevel
newLabels["pod-security.kubernetes.io/enforce"] = string(psaLevel)
newLabels["pod-security.kubernetes.io/enforce-version"] = "latest"
// skip the 'warn' only for the privileged PSA level
if psaLevel != v1alpha1.PrivilegedPodSecurityAdmissionLevel {
newLabels["pod-security.kubernetes.io/warn"] = string(psaLevel)
newLabels["pod-security.kubernetes.io/warn-version"] = "latest"
}
}
if !reflect.DeepEqual(ns.Labels, newLabels) {
log.Debug("labels changed, updating namespace")
ns.Labels = newLabels
return c.Client.Update(ctx, &ns)
}
return nil
}
func (c *ClusterSetReconciler) reconcileClusters(ctx context.Context, log *zap.SugaredLogger, clusterSet *v1alpha1.ClusterSet) error {
log.Info("reconciling Clusters")
var clusters v1alpha1.ClusterList
if err := c.Client.List(ctx, &clusters, ctrlruntimeclient.InNamespace(clusterSet.Namespace)); err != nil {
return err
}
var err error
for _, cluster := range clusters.Items {
oldClusterSpec := cluster.Spec
if cluster.Spec.PriorityClass != clusterSet.Spec.DefaultPriorityClass {
cluster.Spec.PriorityClass = clusterSet.Spec.DefaultPriorityClass
}
if !reflect.DeepEqual(cluster.Spec.NodeSelector, clusterSet.Spec.DefaultNodeSelector) {
cluster.Spec.NodeSelector = clusterSet.Spec.DefaultNodeSelector
}
if !reflect.DeepEqual(oldClusterSpec, cluster.Spec) {
// continue updating also the other clusters even if an error occurred
err = errors.Join(c.Client.Update(ctx, &cluster))
}
}
return err
}

View File

@@ -0,0 +1,88 @@
package clusterset_test
import (
"context"
"path/filepath"
"testing"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/clusterset"
"github.com/rancher/k3k/pkg/log"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestController(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "ClusterSet Controller Suite")
}
var (
testEnv *envtest.Environment
k8sClient client.Client
ctx context.Context
cancel context.CancelFunc
)
var _ = BeforeSuite(func() {
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
ErrorIfCRDPathMissing: true,
}
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
scheme := buildScheme()
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
ctx, cancel = context.WithCancel(context.Background())
nopLogger := &log.Logger{SugaredLogger: zap.NewNop().Sugar()}
err = clusterset.Add(ctx, mgr, "", nopLogger)
Expect(err).NotTo(HaveOccurred())
go func() {
defer GinkgoRecover()
err = mgr.Start(ctx)
Expect(err).NotTo(HaveOccurred(), "failed to run manager")
}()
})
var _ = AfterSuite(func() {
cancel()
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
func buildScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
err := corev1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = appsv1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = networkingv1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
}

View File

@@ -0,0 +1,670 @@
package clusterset_test
import (
"context"
"reflect"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("ClusterSet Controller", func() {
Context("creating a ClusterSet", func() {
var (
namespace string
)
BeforeEach(func() {
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
err := k8sClient.Create(context.Background(), createdNS)
Expect(err).To(Not(HaveOccurred()))
namespace = createdNS.Name
})
When("created with a default spec", func() {
It("should have only the 'shared' allowedNodeTypes", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
allowedModeTypes := clusterSet.Spec.AllowedNodeTypes
Expect(allowedModeTypes).To(HaveLen(1))
Expect(allowedModeTypes).To(ContainElement(v1alpha1.SharedClusterMode))
})
It("should create a NetworkPolicy", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
// look for network policies etc
clusterSetNetworkPolicy := &networkingv1.NetworkPolicy{}
Eventually(func() error {
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: namespace,
}
return k8sClient.Get(ctx, key, clusterSetNetworkPolicy)
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(BeNil())
spec := clusterSetNetworkPolicy.Spec
Expect(spec.PolicyTypes).To(ContainElement(networkingv1.PolicyTypeEgress))
Expect(spec.PolicyTypes).To(ContainElement(networkingv1.PolicyTypeIngress))
// ingress should allow everything
Expect(spec.Ingress).To(ConsistOf(networkingv1.NetworkPolicyIngressRule{}))
// egress should contains some rules
Expect(spec.Egress).To(HaveLen(1))
// allow networking to all external IPs
ipBlockRule := networkingv1.NetworkPolicyPeer{
IPBlock: &networkingv1.IPBlock{CIDR: "0.0.0.0/0"},
}
// allow networking in the same namespace
clusterSetNamespaceRule := networkingv1.NetworkPolicyPeer{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"kubernetes.io/metadata.name": namespace},
},
}
// allow networking to the "kube-dns" pod in the "kube-system" namespace
kubeDNSRule := networkingv1.NetworkPolicyPeer{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"k8s-app": "kube-dns"},
},
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"kubernetes.io/metadata.name": "kube-system"},
},
}
Expect(spec.Egress[0].To).To(ContainElements(
ipBlockRule, clusterSetNamespaceRule, kubeDNSRule,
))
})
})
When("created with DisableNetworkPolicy", func() {
It("should not create a NetworkPolicy if true", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
DisableNetworkPolicy: true,
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
// wait for a bit for the network policy, but it should not be created
Eventually(func() bool {
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: namespace,
}
err := k8sClient.Get(ctx, key, &networkingv1.NetworkPolicy{})
return apierrors.IsNotFound(err)
}).
MustPassRepeatedly(5).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
It("should delete the NetworkPolicy if changed to false", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
// look for network policy
clusterSetNetworkPolicy := &networkingv1.NetworkPolicy{}
Eventually(func() error {
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: namespace,
}
return k8sClient.Get(ctx, key, clusterSetNetworkPolicy)
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(BeNil())
clusterSet.Spec.DisableNetworkPolicy = true
err = k8sClient.Update(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
// wait for a bit for the network policy to being deleted
Eventually(func() bool {
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: namespace,
}
err := k8sClient.Get(ctx, key, clusterSetNetworkPolicy)
return apierrors.IsNotFound(err)
}).
MustPassRepeatedly(5).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
It("should recreate the NetworkPolicy if deleted", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
// look for network policy
clusterSetNetworkPolicy := &networkingv1.NetworkPolicy{}
Eventually(func() error {
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: namespace,
}
return k8sClient.Get(context.Background(), key, clusterSetNetworkPolicy)
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(BeNil())
err = k8sClient.Delete(ctx, clusterSetNetworkPolicy)
Expect(err).To(Not(HaveOccurred()))
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: namespace,
}
err = k8sClient.Get(ctx, key, clusterSetNetworkPolicy)
Expect(apierrors.IsNotFound(err)).Should(BeTrue())
// wait a bit for the network policy to being recreated
Eventually(func() error {
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: namespace,
}
return k8sClient.Get(ctx, key, clusterSetNetworkPolicy)
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeNil())
})
})
When("created specifing the mode", func() {
It("should have the 'virtual' mode if specified", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
AllowedNodeTypes: []v1alpha1.ClusterMode{
v1alpha1.VirtualClusterMode,
},
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
allowedModeTypes := clusterSet.Spec.AllowedNodeTypes
Expect(allowedModeTypes).To(HaveLen(1))
Expect(allowedModeTypes).To(ContainElement(v1alpha1.VirtualClusterMode))
})
It("should have both modes if specified", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
AllowedNodeTypes: []v1alpha1.ClusterMode{
v1alpha1.SharedClusterMode,
v1alpha1.VirtualClusterMode,
},
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
allowedModeTypes := clusterSet.Spec.AllowedNodeTypes
Expect(allowedModeTypes).To(HaveLen(2))
Expect(allowedModeTypes).To(ContainElements(
v1alpha1.SharedClusterMode,
v1alpha1.VirtualClusterMode,
))
})
It("should fail for a non-existing mode", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
AllowedNodeTypes: []v1alpha1.ClusterMode{
v1alpha1.SharedClusterMode,
v1alpha1.VirtualClusterMode,
v1alpha1.ClusterMode("non-existing"),
},
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(HaveOccurred())
})
})
When("created specifing the podSecurityAdmissionLevel", func() {
It("should add and update the proper pod-security labels to the namespace", func() {
var (
privileged = v1alpha1.PrivilegedPodSecurityAdmissionLevel
baseline = v1alpha1.BaselinePodSecurityAdmissionLevel
restricted = v1alpha1.RestrictedPodSecurityAdmissionLevel
)
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
PodSecurityAdmissionLevel: &privileged,
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
var ns corev1.Namespace
// Check privileged
// wait a bit for the namespace to be updated
Eventually(func() bool {
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
Expect(err).To(Not(HaveOccurred()))
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
return enforceValue == "privileged"
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "privileged"))
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn")))
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn-version")))
// Check baseline
clusterSet.Spec.PodSecurityAdmissionLevel = &baseline
err = k8sClient.Update(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
// wait a bit for the namespace to be updated
Eventually(func() bool {
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
Expect(err).To(Not(HaveOccurred()))
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
return enforceValue == "baseline"
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "baseline"))
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/warn", "baseline"))
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/warn-version", "latest"))
// Check restricted
clusterSet.Spec.PodSecurityAdmissionLevel = &restricted
err = k8sClient.Update(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
// wait a bit for the namespace to be updated
Eventually(func() bool {
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
Expect(err).To(Not(HaveOccurred()))
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
return enforceValue == "restricted"
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "restricted"))
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/warn", "restricted"))
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/warn-version", "latest"))
// check cleanup
clusterSet.Spec.PodSecurityAdmissionLevel = nil
err = k8sClient.Update(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
// wait a bit for the namespace to be updated
Eventually(func() bool {
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
Expect(err).To(Not(HaveOccurred()))
_, found := ns.Labels["pod-security.kubernetes.io/enforce"]
return found
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeFalse())
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/enforce")))
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/enforce-version")))
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn")))
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn-version")))
})
It("should restore the labels if Namespace is updated", func() {
privileged := v1alpha1.PrivilegedPodSecurityAdmissionLevel
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
PodSecurityAdmissionLevel: &privileged,
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
var ns corev1.Namespace
// wait a bit for the namespace to be updated
Eventually(func() bool {
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
Expect(err).To(Not(HaveOccurred()))
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
return enforceValue == "privileged"
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "privileged"))
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
ns.Labels["pod-security.kubernetes.io/enforce"] = "baseline"
err = k8sClient.Update(ctx, &ns)
Expect(err).To(Not(HaveOccurred()))
// wait a bit for the namespace to be restored
Eventually(func() bool {
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
Expect(err).To(Not(HaveOccurred()))
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
return enforceValue == "privileged"
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "privileged"))
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
})
})
When("a cluster in the same namespace is present", func() {
It("should update it if needed", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
DefaultPriorityClass: "foobar",
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
cluster := &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
}
err = k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
// wait a bit
Eventually(func() bool {
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, cluster)
Expect(err).To(Not(HaveOccurred()))
return cluster.Spec.PriorityClass == clusterSet.Spec.DefaultPriorityClass
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
It("should update the nodeSelector", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
cluster := &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
}
err = k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
// wait a bit
Eventually(func() bool {
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, cluster)
Expect(err).To(Not(HaveOccurred()))
return reflect.DeepEqual(cluster.Spec.NodeSelector, clusterSet.Spec.DefaultNodeSelector)
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
It("should update the nodeSelector if changed", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
cluster := &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
NodeSelector: map[string]string{"label-1": "value-1"},
},
}
err = k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.NodeSelector).To(Equal(clusterSet.Spec.DefaultNodeSelector))
// update the ClusterSet
clusterSet.Spec.DefaultNodeSelector["label-2"] = "value-2"
err = k8sClient.Update(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.NodeSelector).To(Not(Equal(clusterSet.Spec.DefaultNodeSelector)))
// wait a bit
Eventually(func() bool {
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, cluster)
Expect(err).To(Not(HaveOccurred()))
return reflect.DeepEqual(cluster.Spec.NodeSelector, clusterSet.Spec.DefaultNodeSelector)
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
// Update the Cluster
cluster.Spec.NodeSelector["label-3"] = "value-3"
err = k8sClient.Update(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.NodeSelector).To(Not(Equal(clusterSet.Spec.DefaultNodeSelector)))
// wait a bit and check it's restored
Eventually(func() bool {
var updatedCluster v1alpha1.Cluster
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, &updatedCluster)
Expect(err).To(Not(HaveOccurred()))
return reflect.DeepEqual(updatedCluster.Spec.NodeSelector, clusterSet.Spec.DefaultNodeSelector)
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
})
When("a cluster in a different namespace is present", func() {
It("should not be update", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
DefaultPriorityClass: "foobar",
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
namespace2 := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
err = k8sClient.Create(ctx, namespace2)
Expect(err).To(Not(HaveOccurred()))
cluster := &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace2.Name,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
}
err = k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
// it should not change!
Eventually(func() bool {
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, cluster)
Expect(err).To(Not(HaveOccurred()))
return cluster.Spec.PriorityClass != clusterSet.Spec.DefaultPriorityClass
}).
MustPassRepeatedly(5).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
})
})
})

View File

@@ -0,0 +1,84 @@
package clusterset
import (
"context"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/log"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
nodeController = "k3k-node-controller"
)
type NodeReconciler struct {
Client ctrlruntimeclient.Client
Scheme *runtime.Scheme
ClusterCIDR string
logger *log.Logger
}
// AddNodeController adds a new controller to the manager
func AddNodeController(ctx context.Context, mgr manager.Manager, logger *log.Logger) error {
// initialize a new Reconciler
reconciler := NodeReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
logger: logger.Named(nodeController),
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1.Node{}).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Named(nodeController).
Complete(&reconciler)
}
func (n *NodeReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := n.logger.With("Node", req.NamespacedName)
var clusterSetList v1alpha1.ClusterSetList
if err := n.Client.List(ctx, &clusterSetList); err != nil {
return reconcile.Result{}, err
}
if len(clusterSetList.Items) <= 0 {
return reconcile.Result{}, nil
}
if err := n.ensureNetworkPolicies(ctx, clusterSetList, log); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
func (n *NodeReconciler) ensureNetworkPolicies(ctx context.Context, clusterSetList v1alpha1.ClusterSetList, log *zap.SugaredLogger) error {
var setNetworkPolicy *networkingv1.NetworkPolicy
for _, cs := range clusterSetList.Items {
if cs.Spec.DisableNetworkPolicy {
continue
}
var err error
log.Infow("Updating NetworkPolicy for ClusterSet", "name", cs.Name, "namespace", cs.Namespace)
setNetworkPolicy, err = netpol(ctx, "", &cs, n.Client)
if err != nil {
return err
}
log.Debugw("New NetworkPolicy for clusterset", "name", cs.Name, "namespace", cs.Namespace)
if err := n.Client.Update(ctx, setNetworkPolicy); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,101 @@
package controller
import (
"context"
"crypto/sha256"
"encoding/hex"
"strings"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
const (
namePrefix = "k3k"
k3SImageName = "rancher/k3s"
AdminCommonName = "system:admin"
)
// Backoff is the cluster creation duration backoff
var Backoff = wait.Backoff{
Steps: 5,
Duration: 5 * time.Second,
Factor: 2,
Jitter: 0.1,
}
// K3SImage returns the rancher/k3s image tagged with the specified Version.
// If Version is empty it will use with the same k8s version of the host cluster,
// stored in the Status object. It will return the untagged version as last fallback.
func K3SImage(cluster *v1alpha1.Cluster) string {
if cluster.Spec.Version != "" {
return k3SImageName + ":" + cluster.Spec.Version
}
if cluster.Status.HostVersion != "" {
return k3SImageName + ":" + cluster.Status.HostVersion
}
return k3SImageName
}
func nodeAddress(node *v1.Node) string {
var externalIP string
var internalIP string
for _, ip := range node.Status.Addresses {
if ip.Type == "ExternalIP" && ip.Address != "" {
externalIP = ip.Address
break
}
if ip.Type == "InternalIP" && ip.Address != "" {
internalIP = ip.Address
}
}
if externalIP != "" {
return externalIP
}
return internalIP
}
// return all the nodes external addresses, if not found then return internal addresses
func Addresses(ctx context.Context, client ctrlruntimeclient.Client) ([]string, error) {
var nodeList v1.NodeList
if err := client.List(ctx, &nodeList); err != nil {
return nil, err
}
addresses := make([]string, len(nodeList.Items))
for _, node := range nodeList.Items {
addresses = append(addresses, nodeAddress(&node))
}
return addresses, nil
}
// SafeConcatNameWithPrefix runs the SafeConcatName with extra prefix.
func SafeConcatNameWithPrefix(name ...string) string {
return SafeConcatName(append([]string{namePrefix}, name...)...)
}
// SafeConcatName concatenates the given strings and ensures the returned name is under 64 characters
// by cutting the string off at 57 characters and setting the last 6 with an encoded version of the concatenated string.
func SafeConcatName(name ...string) string {
fullPath := strings.Join(name, "-")
if len(fullPath) < 64 {
return fullPath
}
digest := sha256.Sum256([]byte(fullPath))
// since we cut the string in the middle, the last char may not be compatible with what is expected in k8s
// we are checking and if necessary removing the last char
c := fullPath[56]
if 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
return fullPath[0:57] + "-" + hex.EncodeToString(digest[0:])[0:5]
}
return fullPath[0:56] + "-" + hex.EncodeToString(digest[0:])[0:6]
}

View File

@@ -0,0 +1,110 @@
package kubeconfig
import (
"context"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"time"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type KubeConfig struct {
AltNames certutil.AltNames
CN string
ORG []string
ExpiryDate time.Duration
}
func (k *KubeConfig) Extract(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string) ([]byte, error) {
nn := types.NamespacedName{
Name: controller.SafeConcatNameWithPrefix(cluster.Name, "bootstrap"),
Namespace: cluster.Namespace,
}
var bootstrapSecret v1.Secret
if err := client.Get(ctx, nn, &bootstrapSecret); err != nil {
return nil, err
}
bootstrapData := bootstrapSecret.Data["bootstrap"]
if bootstrapData == nil {
return nil, errors.New("empty bootstrap")
}
var bootstrap bootstrap.ControlRuntimeBootstrap
if err := json.Unmarshal(bootstrapData, &bootstrap); err != nil {
return nil, err
}
adminCert, adminKey, err := certs.CreateClientCertKey(
k.CN, k.ORG,
&k.AltNames, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, k.ExpiryDate,
bootstrap.ClientCA.Content,
bootstrap.ClientCAKey.Content)
if err != nil {
return nil, err
}
// get the server service to extract the right IP
nn = types.NamespacedName{
Name: server.ServiceName(cluster.Name),
Namespace: cluster.Namespace,
}
var k3kService v1.Service
if err := client.Get(ctx, nn, &k3kService); err != nil {
return nil, err
}
url := fmt.Sprintf("https://%s:%d", k3kService.Spec.ClusterIP, server.ServerPort)
if k3kService.Spec.Type == v1.ServiceTypeNodePort {
nodePort := k3kService.Spec.Ports[0].NodePort
url = fmt.Sprintf("https://%s:%d", hostServerIP, nodePort)
}
kubeconfigData, err := kubeconfig(url, []byte(bootstrap.ServerCA.Content), adminCert, adminKey)
if err != nil {
return nil, err
}
return kubeconfigData, nil
}
func kubeconfig(url string, serverCA, clientCert, clientKey []byte) ([]byte, error) {
config := clientcmdapi.NewConfig()
cluster := clientcmdapi.NewCluster()
cluster.CertificateAuthorityData = serverCA
cluster.Server = url
authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientCertificateData = clientCert
authInfo.ClientKeyData = clientKey
context := clientcmdapi.NewContext()
context.AuthInfo = "default"
context.Cluster = "default"
config.Clusters["default"] = cluster
config.AuthInfos["default"] = authInfo
config.Contexts["default"] = context
config.CurrentContext = "default"
kubeconfig, err := clientcmd.Write(*config)
if err != nil {
return nil, err
}
return kubeconfig, nil
}

View File

@@ -1,64 +0,0 @@
package util
import (
"context"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
v1 "k8s.io/api/core/v1"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
namespacePrefix = "k3k-"
k3SImageName = "rancher/k3s"
)
func ClusterNamespace(cluster *v1alpha1.Cluster) string {
return namespacePrefix + cluster.Name
}
func K3SImage(cluster *v1alpha1.Cluster) string {
return k3SImageName + ":" + cluster.Spec.Version
}
func WrapErr(errString string, err error) error {
klog.Errorf("%s: %v", errString, err)
return err
}
func nodeAddress(node *v1.Node) string {
var externalIP string
var internalIP string
for _, ip := range node.Status.Addresses {
if ip.Type == "ExternalIP" && ip.Address != "" {
externalIP = ip.Address
break
}
if ip.Type == "InternalIP" && ip.Address != "" {
internalIP = ip.Address
}
}
if externalIP != "" {
return externalIP
}
return internalIP
}
// return all the nodes external addresses, if not found then return internal addresses
func Addresses(ctx context.Context, client client.Client) ([]string, error) {
var nodeList v1.NodeList
if err := client.List(ctx, &nodeList); err != nil {
return nil, err
}
addresses := make([]string, len(nodeList.Items))
for _, node := range nodeList.Items {
addresses = append(addresses, nodeAddress(&node))
}
return addresses, nil
}

51
pkg/log/zap.go Normal file
View File

@@ -0,0 +1,51 @@
package log
import (
"os"
"github.com/virtual-kubelet/virtual-kubelet/log"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
ctrlruntimezap "sigs.k8s.io/controller-runtime/pkg/log/zap"
)
type Logger struct {
*zap.SugaredLogger
}
func New(debug bool) *Logger {
return &Logger{newZappLogger(debug).Sugar()}
}
func (l *Logger) WithError(err error) log.Logger {
return l
}
func (l *Logger) WithField(string, interface{}) log.Logger {
return l
}
func (l *Logger) WithFields(field log.Fields) log.Logger {
return l
}
func (l *Logger) Named(name string) *Logger {
l.SugaredLogger = l.SugaredLogger.Named(name)
return l
}
func newZappLogger(debug bool) *zap.Logger {
encCfg := zap.NewProductionEncoderConfig()
encCfg.TimeKey = "timestamp"
encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
lvl := zap.NewAtomicLevelAt(zap.InfoLevel)
if debug {
lvl = zap.NewAtomicLevelAt(zap.DebugLevel)
}
encoder := zapcore.NewJSONEncoder(encCfg)
core := zapcore.NewCore(&ctrlruntimezap.KubeAwareEncoder{Encoder: encoder}, zapcore.AddSync(os.Stderr), lvl)
return zap.New(core)
}

24
scripts/build Executable file
View File

@@ -0,0 +1,24 @@
#!/bin/bash
set -e pipefail
TAG=$(git describe --tag --always --match="v[0-9]*")
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
TAG="${TAG}-dirty"
fi
LDFLAGS="-X \"github.com/rancher/k3k/pkg/buildinfo.Version=${TAG}\""
echo "Building k3k..."
echo "Current TAG: ${TAG}"
export CGO_ENABLED=0
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" -o bin/k3k
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" -o bin/k3k-kubelet ./k3k-kubelet
# build the cli for the local OS and ARCH
go build -ldflags="${LDFLAGS}" -o bin/k3kcli ./cli
docker build -f package/Dockerfile -t rancher/k3k:dev -t rancher/k3k:${TAG} .
docker build -f package/Dockerfile.kubelet -t rancher/k3k-kubelet:dev -t rancher/k3k-kubelet:${TAG} .

88
tests/cluster_test.go Normal file
View File

@@ -0,0 +1,88 @@
package k3k_test
import (
"context"
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
)
var _ = When("a cluster is installed", func() {
var namespace string
BeforeEach(func() {
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
createdNS, err := k8s.CoreV1().Namespaces().Create(context.Background(), createdNS, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
namespace = createdNS.Name
})
It("will be created in shared mode", func() {
cluster := v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
Version: "v1.26.1-k3s1",
},
}
err := k8sClient.Create(context.Background(), &cluster)
Expect(err).To(Not(HaveOccurred()))
By("checking server and kubelet readiness state")
// check that the server Pod and the Kubelet are in Ready state
Eventually(func() bool {
podList, err := k8s.CoreV1().Pods(namespace).List(context.Background(), v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
serverRunning := false
kubeletRunning := false
for _, pod := range podList.Items {
imageName := pod.Spec.Containers[0].Image
imageName = strings.Split(imageName, ":")[0] // remove tag
switch imageName {
case "rancher/k3s":
serverRunning = pod.Status.Phase == corev1.PodRunning
case "rancher/k3k-kubelet":
kubeletRunning = pod.Status.Phase == corev1.PodRunning
}
if serverRunning && kubeletRunning {
return true
}
}
return false
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeTrue())
By("checking the existence of the bootstrap secret")
secretName := fmt.Sprintf("k3k-%s-bootstrap", cluster.Name)
Eventually(func() error {
_, err := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretName, v1.GetOptions{})
return err
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeNil())
})
})

View File

@@ -0,0 +1,55 @@
package k3k_test
import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/discovery"
memory "k8s.io/client-go/discovery/cached"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
)
type RESTClientGetter struct {
clientconfig clientcmd.ClientConfig
restConfig *rest.Config
discoveryClient discovery.CachedDiscoveryInterface
}
func NewRESTClientGetter(kubeconfig []byte) (*RESTClientGetter, error) {
clientconfig, err := clientcmd.NewClientConfigFromBytes([]byte(kubeconfig))
if err != nil {
return nil, err
}
restConfig, err := clientconfig.ClientConfig()
if err != nil {
return nil, err
}
dc, err := discovery.NewDiscoveryClientForConfig(restConfig)
if err != nil {
return nil, err
}
return &RESTClientGetter{
clientconfig: clientconfig,
restConfig: restConfig,
discoveryClient: memory.NewMemCacheClient(dc),
}, nil
}
func (r *RESTClientGetter) ToRESTConfig() (*rest.Config, error) {
return r.restConfig, nil
}
func (r *RESTClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
return r.discoveryClient, nil
}
func (r *RESTClientGetter) ToRESTMapper() (meta.RESTMapper, error) {
return restmapper.NewDeferredDiscoveryRESTMapper(r.discoveryClient), nil
}
func (r *RESTClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig {
return r.clientconfig
}

179
tests/tests_suite_test.go Normal file
View File

@@ -0,0 +1,179 @@
package k3k_test
import (
"context"
"fmt"
"io"
"maps"
"os"
"path"
"testing"
"time"
"github.com/go-logr/zapr"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/modules/k3s"
"go.uber.org/zap"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart/loader"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func TestTests(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Tests Suite")
}
var (
k3sContainer *k3s.K3sContainer
k8s *kubernetes.Clientset
k8sClient client.Client
)
var _ = BeforeSuite(func() {
var err error
ctx := context.Background()
k3sContainer, err = k3s.Run(ctx, "rancher/k3s:v1.27.1-k3s1")
Expect(err).To(Not(HaveOccurred()))
kubeconfig, err := k3sContainer.GetKubeConfig(context.Background())
Expect(err).To(Not(HaveOccurred()))
initKubernetesClient(kubeconfig)
installK3kChart(kubeconfig)
})
func initKubernetesClient(kubeconfig []byte) {
restcfg, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
k8s, err = kubernetes.NewForConfig(restcfg)
Expect(err).To(Not(HaveOccurred()))
scheme := buildScheme()
k8sClient, err = client.New(restcfg, client.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
logger, err := zap.NewDevelopment()
Expect(err).NotTo(HaveOccurred())
log.SetLogger(zapr.NewLogger(logger))
}
func installK3kChart(kubeconfig []byte) {
pwd, err := os.Getwd()
Expect(err).To(Not(HaveOccurred()))
k3kChart, err := loader.Load(path.Join(pwd, "../charts/k3k"))
Expect(err).To(Not(HaveOccurred()))
actionConfig := new(action.Configuration)
restClientGetter, err := NewRESTClientGetter(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
releaseName := "k3k"
releaseNamespace := "k3k-system"
err = actionConfig.Init(restClientGetter, releaseNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) {
fmt.Fprintf(GinkgoWriter, "helm debug: "+format+"\n", v...)
})
Expect(err).To(Not(HaveOccurred()))
iCli := action.NewInstall(actionConfig)
iCli.ReleaseName = releaseName
iCli.Namespace = releaseNamespace
iCli.CreateNamespace = true
iCli.Timeout = time.Minute
iCli.Wait = true
imageMap, _ := k3kChart.Values["image"].(map[string]any)
maps.Copy(imageMap, map[string]any{
"repository": "rancher/k3k",
"tag": "dev",
"pullPolicy": "IfNotPresent",
})
sharedAgentMap, _ := k3kChart.Values["sharedAgent"].(map[string]any)
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
maps.Copy(sharedAgentImageMap, map[string]any{
"repository": "rancher/k3k-kubelet",
"tag": "dev",
})
err = k3sContainer.LoadImages(context.Background(), "rancher/k3k:dev", "rancher/k3k-kubelet:dev")
Expect(err).To(Not(HaveOccurred()))
release, err := iCli.Run(k3kChart, k3kChart.Values)
Expect(err).To(Not(HaveOccurred()))
fmt.Fprintf(GinkgoWriter, "Release %s installed in %s namespace\n", release.Name, release.Namespace)
}
var _ = AfterSuite(func() {
// dump k3s logs
readCloser, err := k3sContainer.Logs(context.Background())
Expect(err).To(Not(HaveOccurred()))
logs, err := io.ReadAll(readCloser)
Expect(err).To(Not(HaveOccurred()))
logfile := path.Join(os.TempDir(), "k3s.log")
err = os.WriteFile(logfile, logs, 0644)
Expect(err).To(Not(HaveOccurred()))
fmt.Fprintln(GinkgoWriter, "k3s logs written to: "+logfile)
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
})
var _ = When("k3k is installed", func() {
It("has to be in a Ready state", func() {
// check that at least a Pod is in Ready state
Eventually(func() bool {
opts := v1.ListOptions{LabelSelector: "app.kubernetes.io/name=k3k"}
podList, err := k8s.CoreV1().Pods("k3k-system").List(context.Background(), opts)
Expect(err).To(Not(HaveOccurred()))
Expect(podList.Items).To(Not(BeEmpty()))
isReady := false
outer:
for _, pod := range podList.Items {
for _, condition := range pod.Status.Conditions {
if condition.Status != corev1.ConditionTrue {
continue
}
if condition.Type == corev1.PodReady {
isReady = true
break outer
}
}
}
return isReady
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
})
func buildScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
}