Compare commits

...

57 Commits

Author SHA1 Message Date
renovate-rancher[bot]
f4cd57b9f5 chore(deps): update github actions (#711)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-03-23 14:54:13 +01:00
renovate-rancher[bot]
0dbd930292 chore(deps): update github actions (#653)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-03-23 13:05:15 +01:00
Enrico Candino
9554628fc5 Update virtual-kubelet (v1.12) and Kubernetes deps (v1.35) (#716)
* bump virtual-kubelet and k8s

* bump controller-manager

* fix upgrade-downgrade

* fix kubernetes version

* Update tests_suite_test.go

* removed direct dep of yaml.v2, bump etcd modules
2026-03-23 12:51:43 +01:00
Chris Wayne
78e805889d Merge pull request #717 from macedogm/chore/bump-aquasecurity-trivy-action-v0.35.0
chore(ci): bump aquasecurity/trivy-action to v0.35.0
2026-03-20 10:36:02 -04:00
Guilherme Macedo
34ef69ba50 chore(ci): bump aquasecurity/trivy-action to v0.35.0
Signed-off-by: Guilherme Macedo <guilherme@gmacedo.com>
2026-03-20 11:16:51 -03:00
Kevin McDermott
97a6a61859 Merge pull request #714 from bigkevmcd/dont-start-metricsserver
Don't start the metrics server in tests.
2026-03-20 08:34:10 +00:00
Kevin McDermott
056b36e8b5 Don't start the metrics server in tests.
This prevents the metrics server from starting when testing.

None of the tests check the metrics server.
2026-03-19 14:06:59 +00:00
Enrico Candino
c34565da4d update golang grpc module (#712) 2026-03-19 13:40:53 +01:00
Enrico Candino
7b0f695248 Bump some tes dependencies and fix lint (#708) 2026-03-18 17:43:41 +01:00
Jonathan Crowther
675ece9edc Merge pull request #704 from JonCrowther/arm64-support
Change build and generate scripts for arm64 compatibility
2026-03-18 12:14:31 -04:00
Jonathan Crowther
733fb345cc Use CompactSequenceIndent faeture of yq 2026-03-18 09:21:52 -04:00
renovate-rancher[bot]
0b214e0769 chore(deps): update github actions (#642)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-03-18 12:24:18 +01:00
renovate-rancher[bot]
512339440b chore(deps): update dependency go to v1.25.8 (#664)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-03-18 11:57:11 +01:00
Kevin McDermott
9d38388c80 Merge pull request #662 from bigkevmcd/change-role-to-worker
Change the nodes type to worker.
2026-03-18 08:02:31 +00:00
Jonathan Crowther
e6f0cb414c Run go generate 2026-03-17 14:01:09 -04:00
Jonathan Crowther
4928ca8925 Use yq instead of sed 2026-03-17 13:55:14 -04:00
Jonathan Crowther
e89a790fc9 Change build and generate scripts for arm64 compatibility 2026-03-17 13:45:06 -04:00
Enrico Candino
7641a1c9c5 Add sync of Host StorageClasses (#681)
* initial impl

* wip test

* fix

* wip tests

* Refactor storage class sync logic and enhance test coverage

* fix test

* remove storageclass sync test

* removed commented code

* added sync to cluster status to apply policy configuration

* fix for storageClass policy indexes

* fix for missing indexed field, and label sync

* - update sync options descriptions for resource types
- added storage class tests sync with policy
- requested changes

* fix for nil map
2026-03-17 16:53:29 +01:00
Kevin McDermott
d975171920 Change the nodes type to worker.
This modifies the configuration of the created nodes via virtual-kubelet
to set the node type to be worker instead of agent.

Bump the Ginkgo version to the latest - allows use of `t.Context()`
rather than creating contexts.

Co-authored-by: Enrico Candino <enrico.candino@gmail.com>
2026-03-17 09:28:30 +00:00
Hussein Galal
fcb05793b1 Refactor distribution algorithm to account for host capacity (#688)
* Refactor distribution algorithm to account for host capacity

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* simplify the useMilli condition

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* use nodelists instead of passing clients

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* only pass resource maps for both virtual and host nodes

Signed-off-by: hussein <hussein@thinkpad-hussein.hgalal.az>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
Signed-off-by: hussein <hussein@thinkpad-hussein.hgalal.az>
Co-authored-by: hussein <hussein@thinkpad-hussein.hgalal.az>
2026-03-12 16:52:37 +02:00
Enrico Candino
83b4415f02 refactor: streamline K3S Docker installation and chart setup with dynamic repository handling (#692) 2026-03-12 11:11:12 +01:00
Hussein Galal
cd72bcbc15 use apireader instead of client for node registration in mirror host node (#686)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2026-03-10 17:43:42 +02:00
Enrico Candino
9836f8376d Added policy in Cluster Status (#663)
* initial implementation

restored policyName

* added test, fixed priority scheduling

* requested changes from review

- wrapped errors
- fixed some kube-api-linter issues to match k8s conventions
- moved policy namespace check in the same condition branch
2026-02-17 16:15:13 +01:00
Andreas Kupries
dba054786e Merge pull request #659 from andreas-kupries/syncer-controller-owner
change ControllerReferences over to OwnerReferences
2026-02-17 11:54:30 +01:00
Andreas Kupries
c94f7c7a30 fix: switch ControllerReferences over to OwnerReferences 2026-02-17 11:01:21 +01:00
Kevin McDermott
1a16527750 Merge pull request #670 from rancher/bump-chart-version
Release v1.0.2 updates
2026-02-16 14:52:02 +00:00
Kevin McDermott
e7df4ed7f0 Release v1.0.2 updates
Bump the default chart version and update the README with the new
version.
2026-02-16 14:48:24 +00:00
Enrico Candino
9fae02fcbf Pin QEMU setup to use tonistiigi/binfmt:qemu-v10.0.4-56 image (#669) 2026-02-16 15:28:30 +01:00
Enrico Candino
f341f7f5e8 Bump Charts to 1.0.2-rc1 (#652) 2026-01-29 09:55:31 +01:00
renovate-rancher[bot]
ca50a6b231 Update registry.suse.com/bci/bci-base Docker tag to v15.7 (#651)
* Update registry.suse.com/bci/bci-base Docker tag to v15.7

* move k3k controller image to `registry.suse.com/bci/bci-base:15.7`

---------

Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
Co-authored-by: Enrico Candino <enrico.candino@suse.com>
2026-01-28 12:35:28 +01:00
Enrico Candino
004e177ac1 Bump kubernetes dependencies (v1.33) (#647)
* bump kubernetes to v0.33.7

* updated kuberneets api versions

* bump tests

* fix k3s version

* fix test

* centralize k8s version

* remove focus

* revert GetPodCondition, GetContainerStatus and pin of k8s.io/controller-manager
2026-01-27 22:28:56 +01:00
Enrico Candino
0164c785ab Show correct allocatable resources when a Policy is applied (#638)
* wip

* wip

* wip

* fix lint and tests

* fixed bugs for missing resources

* cleanup and refactor

* removed coreClient from configureNode

* added comments to distribute algorithm
2026-01-27 15:56:37 +01:00
Hussein Galal
c1b7da4c72 SecretMounts feature and private registries (#570)
* Add SecretMounts field

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2026-01-26 21:47:40 +02:00
renovate-rancher[bot]
ff0b03af02 Update Update Kubernetes dependencies to v1.32.10 [SECURITY] (#626)
* Update Update Kubernetes dependencies to v1.32.10 [SECURITY]

* bump k8s.io/kubelet

---------

Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
Co-authored-by: Enrico Candino <enrico.candino@suse.com>
2026-01-26 17:24:14 +01:00
Enrico Candino
62a76a8202 Bump testcontainers-go (v0.40.0), containerd (v1.7.30) and x/crypto (v0.45.0) (#640)
* bump testcontainers to v0.40.0

* bump containerd andx/crypto
2026-01-26 16:37:05 +01:00
Enrico Candino
9e841cc75c Update helm.sh/helm/v3 to v3.18.5 (#641)
* bump helm to v3.17.4

* removed unneeded replace

* bump helm to v3.18.5
2026-01-26 15:38:56 +01:00
renovate-rancher[bot]
bc79a2e6a9 Update module github.com/sirupsen/logrus to v1.9.4 (#631)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-01-26 13:45:23 +01:00
renovate-rancher[bot]
3681614a3e Update dependency golangci/golangci-lint to v2.8.0 (#635)
* Update dependency golangci/golangci-lint to v2.8.0

* bump golangci-lint version in github action

---------

Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
Co-authored-by: Enrico Candino <enrico.candino@suse.com>
2026-01-26 13:30:26 +01:00
renovate-rancher[bot]
f04d88bd3f Update github/codeql-action digest to 38e701f (#634)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-01-26 10:27:20 +01:00
renovate-rancher[bot]
4b293cef42 Update module go.uber.org/zap to v1.27.1 (#633)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-01-26 10:19:02 +01:00
renovate-rancher[bot]
1e0aa0ad37 Update module github.com/spf13/cobra to v1.10.2 (#632)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-01-26 10:18:36 +01:00
renovate-rancher[bot]
e28fa84ae7 Update module github.com/go-logr/logr to v1.4.3 (#629)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-01-26 10:18:01 +01:00
renovate-rancher[bot]
511be5aa4e Pin dependencies (#628)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-01-22 15:24:57 +01:00
renovate-rancher[bot]
cd6c962bcf Migrate config .github/renovate.json (#627)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-01-22 15:06:16 +01:00
Kevin McDermott
c0418267c9 Merge pull request #623 from bigkevmcd/resource-quantity
Use resource.Quantity instead of a string for storageRequestSize in the Cluster definition.
2026-01-22 13:13:06 +00:00
Kevin McDermott
eaa20c16e7 Make the storageRequestSize immutable.
It can't be changed in the StatefulSet and modifying the value causes an
error.
2026-01-22 08:27:20 +00:00
jpgouin
0cea0c9e14 Only reconcile the server resource on the StatefullSet Controller (fix #618) 2026-01-21 16:53:52 +01:00
Kevin McDermott
d12f3ea757 Fix lint issues and failing test.
golangci-lint was complaining about duplicate imports of corev1 and the
ordering of them in the files.
2026-01-21 14:50:30 +00:00
Kevin McDermott
9ea81c861b Use resource.Quantity for storageRequestSize
Previously the resource.Quantity was stored as string which allowed
invalid values to be created.

This performs validation on the strings using the standard K8s resource
mechanism.
2026-01-21 14:50:28 +00:00
Enrico Candino
20c5441030 Bump to Go 1.25 (#620)
* bump to Go 1.25

* add go toolchain
2026-01-21 15:21:34 +01:00
renovate-rancher[bot]
a3a4c931a0 Add initial Renovate configuration (#621)
* Add initial Renovate configuration

* add permission

* fix multiple runs

---------

Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
Co-authored-by: Enrico Candino <enrico.candino@suse.com>
2026-01-21 15:04:51 +01:00
Hussein Galal
fcc7191ab3 CLI cluster update (#595)
* CLI cluster update

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2026-01-20 14:00:24 +02:00
jpgouin
ff6862e511 fix virtual pod NodeSelector #572 (#616) 2026-01-20 11:33:42 +01:00
Peter Matseykanets
20305e03b7 Add a dedicated Validate GitHub Actions workflow (#614) 2026-01-19 10:00:12 -05:00
Enrico Candino
5f42eafd2a Dev doc update (#611)
* update development.md

* fix tests

* fix cli test
2026-01-16 14:56:43 +01:00
Enrico Candino
ccc3d1651c Fixed resource allocation fetching the stats from the node where the (#610)
virtual-kubelet is running on.
Removed random node selection during Pod creation.
2026-01-16 13:23:18 +01:00
Enrico Candino
0185998aa0 Bump Charts to 1.0.2-rc1 (#609) 2026-01-15 14:12:52 +01:00
99 changed files with 5454 additions and 1201 deletions

10
.github/renovate.json vendored Normal file
View File

@@ -0,0 +1,10 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"github>rancher/renovate-config#release"
],
"baseBranchPatterns": [
"main"
],
"prHourlyLimit": 2
}

View File

@@ -2,9 +2,9 @@ name: Build
on:
push:
branches:
- main
branches: [main]
pull_request:
types: [opened, synchronize, reopened]
permissions:
contents: read
@@ -19,18 +19,18 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version-file: go.mod
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6
uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7
with:
distribution: goreleaser
version: v2
@@ -40,7 +40,7 @@ jobs:
REGISTRY: ""
- name: Run Trivy vulnerability scanner (k3kcli)
uses: aquasecurity/trivy-action@0.28.0
uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # v0.35.0
with:
ignore-unfixed: true
severity: 'MEDIUM,HIGH,CRITICAL'
@@ -50,13 +50,13 @@ jobs:
output: 'trivy-results-k3kcli.sarif'
- name: Upload Trivy scan results to GitHub Security tab (k3kcli)
uses: github/codeql-action/upload-sarif@v3
uses: github/codeql-action/upload-sarif@ebcb5b36ded6beda4ceefea6a8bc4cc885255bb3 # v3
with:
sarif_file: trivy-results-k3kcli.sarif
category: k3kcli
- name: Run Trivy vulnerability scanner (k3k)
uses: aquasecurity/trivy-action@0.28.0
uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # v0.35.0
with:
ignore-unfixed: true
severity: 'MEDIUM,HIGH,CRITICAL'
@@ -66,13 +66,13 @@ jobs:
output: 'trivy-results-k3k.sarif'
- name: Upload Trivy scan results to GitHub Security tab (k3k)
uses: github/codeql-action/upload-sarif@v3
uses: github/codeql-action/upload-sarif@ebcb5b36ded6beda4ceefea6a8bc4cc885255bb3 # v3
with:
sarif_file: trivy-results-k3k.sarif
category: k3k
- name: Run Trivy vulnerability scanner (k3k-kubelet)
uses: aquasecurity/trivy-action@0.28.0
uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # v0.35.0
with:
ignore-unfixed: true
severity: 'MEDIUM,HIGH,CRITICAL'
@@ -82,7 +82,7 @@ jobs:
output: 'trivy-results-k3k-kubelet.sarif'
- name: Upload Trivy scan results to GitHub Security tab (k3k-kubelet)
uses: github/codeql-action/upload-sarif@v3
uses: github/codeql-action/upload-sarif@ebcb5b36ded6beda4ceefea6a8bc4cc885255bb3 # v3
with:
sarif_file: trivy-results-k3k-kubelet.sarif
category: k3k-kubelet

View File

@@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
fetch-depth: 0
@@ -21,12 +21,12 @@ jobs:
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v4
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.6.0
uses: helm/chart-releaser-action@cae68fefc6b5f367a0275617c9f83181ba54714f # v1.7.0
with:
config: .cr.yaml
env:

View File

@@ -2,7 +2,7 @@ name: FOSSA Scanning
on:
push:
branches: ["main", "master", "release/**"]
branches: ["main", "release/**"]
workflow_dispatch:
permissions:
@@ -15,7 +15,7 @@ jobs:
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
# The FOSSA token is shared between all repos in Rancher's GH org. It can be
# used directly and there is no need to request specific access to EIO.

View File

@@ -24,7 +24,7 @@ jobs:
run: echo "::error::Missing tag from input" && exit 1
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Check if release is draft
run: |

View File

@@ -21,7 +21,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
fetch-depth: 0
fetch-tags: true
@@ -31,12 +31,19 @@ jobs:
run: git checkout ${{ inputs.commit }}
- name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version-file: go.mod
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4
with:
image: tonistiigi/binfmt:qemu-v10.0.4-56
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4
with:
version: v0.30.1
- name: "Read secrets"
uses: rancher-eio/read-vault-secrets@main
@@ -55,7 +62,7 @@ jobs:
echo "DOCKER_PASSWORD=${{ github.token }}" >> $GITHUB_ENV
- name: Login to container registry
uses: docker/login-action@v3
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ${{ env.REGISTRY }}
username: ${{ env.DOCKER_USERNAME }}
@@ -78,7 +85,7 @@ jobs:
echo "CURRENT_TAG=${CURRENT_TAG}" >> "$GITHUB_OUTPUT"
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6
uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7
with:
distribution: goreleaser
version: v2

63
.github/workflows/renovate-vault.yml vendored Normal file
View File

@@ -0,0 +1,63 @@
name: Renovate
on:
workflow_dispatch:
inputs:
logLevel:
description: "Override default log level"
required: false
default: info
type: choice
options:
- info
- debug
overrideSchedule:
description: "Override all schedules"
required: false
default: "false"
type: choice
options:
- "false"
- "true"
configMigration:
description: "Toggle PRs for config migration"
required: false
default: "true"
type: choice
options:
- "false"
- "true"
renovateConfig:
description: "Define a custom renovate config file"
required: false
default: ".github/renovate.json"
type: string
minimumReleaseAge:
description: "Override minimumReleaseAge for a one-time run (e.g., '0 days' to disable delay)"
required: false
default: "null"
type: string
extendsPreset:
description: "Override renovate extends preset (default: 'github>rancher/renovate-config#release')."
required: false
default: "github>rancher/renovate-config#release"
type: string
schedule:
- cron: '30 4,6 * * 1-5'
permissions:
contents: read
id-token: write
jobs:
call-workflow:
uses: rancher/renovate-config/.github/workflows/renovate-vault.yml@release
with:
configMigration: ${{ inputs.configMigration || 'true' }}
logLevel: ${{ inputs.logLevel || 'info' }}
overrideSchedule: ${{ github.event.inputs.overrideSchedule == 'true' && '{''schedule'':null}' || '' }}
renovateConfig: ${{ inputs.renovateConfig || '.github/renovate.json' }}
minimumReleaseAge: ${{ inputs.minimumReleaseAge || 'null' }}
extendsPreset: ${{ inputs.extendsPreset || 'github>rancher/renovate-config#release' }}
secrets:
override-token: "${{ secrets.RENOVATE_FORK_GH_TOKEN || '' }}"

View File

@@ -21,17 +21,17 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version-file: go.mod
- name: Install helm
uses: azure/setup-helm@v4.3.0
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1
- name: Install hydrophone
run: go install sigs.k8s.io/hydrophone@latest
@@ -131,7 +131,7 @@ jobs:
--output-dir /tmp
- name: Archive conformance logs
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
if: always()
with:
name: conformance-${{ matrix.type }}-logs

View File

@@ -21,17 +21,17 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version-file: go.mod
- name: Install helm
uses: azure/setup-helm@v4.3.0
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1
- name: Install hydrophone
run: go install sigs.k8s.io/hydrophone@latest
@@ -39,7 +39,7 @@ jobs:
- name: Install k3s
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
K3S_HOST_VERSION: v1.32.1+k3s1
K3S_HOST_VERSION: v1.35.2+k3s1
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${K3S_HOST_VERSION} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
@@ -104,21 +104,21 @@ jobs:
kubectl logs -n k3k-system -l "app.kubernetes.io/name=k3k" --tail=-1 > /tmp/k3k.log
- name: Archive K3s logs
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
if: always()
with:
name: k3s-${{ matrix.type }}-logs
path: /tmp/k3s.log
- name: Archive K3k logs
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
if: always()
with:
name: k3k-${{ matrix.type }}-logs
path: /tmp/k3k.log
- name: Archive conformance logs
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
if: always()
with:
name: conformance-${{ matrix.type }}-logs

View File

@@ -2,42 +2,26 @@ name: Tests E2E
on:
push:
branches: [main]
pull_request:
types: [opened, synchronize, reopened]
workflow_dispatch:
permissions:
contents: read
jobs:
validate:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install Pandoc
run: sudo apt-get install pandoc
- name: Validate
run: make validate
tests-e2e:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version-file: go.mod
@@ -80,44 +64,43 @@ jobs:
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
- name: Upload coverage reports to Codecov (controller)
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@1af58845a975a7985b0beb0cbe6fbbb71a41dbad # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${GOCOVERDIR}/cover.out
flags: controller
- name: Upload coverage reports to Codecov (e2e)
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@1af58845a975a7985b0beb0cbe6fbbb71a41dbad # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: e2e
- name: Archive k3s logs
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
if: always()
with:
name: e2e-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
if: always()
with:
name: e2e-k3k-logs
path: /tmp/k3k.log
tests-e2e-slow:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version-file: go.mod
@@ -160,28 +143,28 @@ jobs:
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
- name: Upload coverage reports to Codecov (controller)
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@1af58845a975a7985b0beb0cbe6fbbb71a41dbad # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${GOCOVERDIR}/cover.out
flags: controller
- name: Upload coverage reports to Codecov (e2e)
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@1af58845a975a7985b0beb0cbe6fbbb71a41dbad # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: e2e
- name: Archive k3s logs
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
if: always()
with:
name: e2e-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
if: always()
with:
name: e2e-k3k-logs

View File

@@ -2,56 +2,23 @@ name: Tests
on:
push:
branches: [main]
pull_request:
types: [opened, synchronize, reopened]
workflow_dispatch:
permissions:
contents: read
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: golangci-lint
uses: golangci/golangci-lint-action@v8
with:
args: --timeout=5m
version: v2.3.0
validate:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install Pandoc
run: sudo apt-get install pandoc
- name: Validate
run: make validate
tests:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- uses: actions/setup-go@v5
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version-file: go.mod
@@ -59,7 +26,7 @@ jobs:
run: make test-unit
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@1af58845a975a7985b0beb0cbe6fbbb71a41dbad # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
@@ -67,16 +34,15 @@ jobs:
tests-cli:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version-file: go.mod
@@ -112,21 +78,21 @@ jobs:
run: go tool covdata textfmt -i=${{ github.workspace }}/covdata -o ${{ github.workspace }}/covdata/cover.out
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@1af58845a975a7985b0beb0cbe6fbbb71a41dbad # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${{ github.workspace }}/covdata/cover.out
flags: cli
- name: Archive k3s logs
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
if: always()
with:
name: cli-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
if: always()
with:
name: cli-k3k-logs

41
.github/workflows/validate.yml vendored Normal file
View File

@@ -0,0 +1,41 @@
name: Validate
on:
push:
branches: [main]
pull_request:
types: [opened, synchronize, reopened]
workflow_dispatch:
permissions:
contents: read
jobs:
validate:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Go
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version-file: go.mod
cache: true
- name: Install Pandoc
run: sudo apt-get install pandoc
- name: Run linters
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
with:
version: v2.11.3
args: -v
only-new-issues: true
skip-cache: false
- name: Run formatters
run: golangci-lint -v fmt ./...
- name: Validate
run: make validate

View File

@@ -5,8 +5,8 @@ VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*")
## Dependencies
GOLANGCI_LINT_VERSION := v2.3.0
GINKGO_VERSION ?= v2.21.0
GOLANGCI_LINT_VERSION := v2.11.3
GINKGO_VERSION ?= v2.28.1
GINKGO_FLAGS ?= -v -r --coverprofile=cover.out --coverpkg=./...
ENVTEST_VERSION ?= v0.0.0-20250505003155-b6c5897febe5
ENVTEST_K8S_VERSION := 1.31.0
@@ -111,14 +111,17 @@ lint: ## Find any linting issues in the project
$(GOLANGCI_LINT) run --timeout=5m
.PHONY: fmt
fmt: ## Find any linting issues in the project
fmt: ## Format source files in the project
ifndef CI
$(GOLANGCI_LINT) fmt ./...
endif
.PHONY: validate
validate: generate docs fmt ## Validate the project checking for any dependency or doc mismatch
$(GINKGO) unfocus
go mod tidy
git status --porcelain
go mod verify
git status --porcelain
git --no-pager diff --exit-code
.PHONY: install

View File

@@ -67,7 +67,7 @@ To install it, simply download the latest available version for your architectur
For example, you can download the Linux amd64 version with:
```
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.1/k3kcli-linux-amd64 && \
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.2/k3kcli-linux-amd64 && \
chmod +x k3kcli && \
sudo mv k3kcli /usr/local/bin
```
@@ -75,7 +75,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.1/k3kcli-l
You should now be able to run:
```bash
-> % k3kcli --version
k3kcli version v1.0.1
k3kcli version v1.0.2
```

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 1.0.1
appVersion: v1.0.1
version: 1.0.2
appVersion: v1.0.2

View File

@@ -3,8 +3,8 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.20.0
helm.sh/resource-policy: keep
controller-gen.kubebuilder.io/version: v0.16.0
name: clusters.k3k.io
spec:
group: k3k.io
@@ -55,11 +55,9 @@ spec:
description: Spec defines the desired state of the Cluster.
properties:
addons:
description: Addons specifies secrets containing raw YAML to deploy
on cluster startup.
description: Addons specifies secrets containing raw YAML to deploy on cluster startup.
items:
description: Addon specifies a Secret containing YAML to be deployed
on cluster startup.
description: Addon specifies a Secret containing YAML to be deployed on cluster startup.
properties:
secretNamespace:
description: SecretNamespace is the namespace of the Secret.
@@ -77,14 +75,14 @@ spec:
type: string
type: array
agentEnvs:
description: AgentEnvs specifies list of environment variables to
set in the agent pod.
description: AgentEnvs specifies list of environment variables to set in the agent pod.
items:
description: EnvVar represents an environment variable present in
a Container.
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -99,8 +97,7 @@ spec:
Defaults to "".
type: string
valueFrom:
description: Source for the environment variable's value. Cannot
be used if value is not empty.
description: Source for the environment variable's value. Cannot be used if value is not empty.
properties:
configMapKeyRef:
description: Selects a key of a ConfigMap.
@@ -118,8 +115,7 @@ spec:
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
type: string
optional:
description: Specify whether the ConfigMap or its key
must be defined
description: Specify whether the ConfigMap or its key must be defined
type: boolean
required:
- key
@@ -131,32 +127,64 @@ spec:
spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
properties:
apiVersion:
description: Version of the schema the FieldPath is
written in terms of, defaults to "v1".
description: Version of the schema the FieldPath is written in terms of, defaults to "v1".
type: string
fieldPath:
description: Path of the field to select in the specified
API version.
description: Path of the field to select in the specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
properties:
containerName:
description: 'Container name: required for volumes,
optional for env vars'
description: 'Container name: required for volumes, optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format of the exposed
resources, defaults to "1"
description: Specifies the output format of the exposed resources, defaults to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
@@ -170,8 +198,7 @@ spec:
description: Selects a key of a secret in the pod's namespace
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
default: ""
@@ -183,8 +210,7 @@ spec:
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
type: string
optional:
description: Specify whether the Secret or its key must
be defined
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
@@ -225,68 +251,59 @@ spec:
- message: clusterDNS is immutable
rule: self == oldSelf
customCAs:
description: CustomCAs specifies the cert/key pairs for custom CA
certificates.
description: CustomCAs specifies the cert/key pairs for custom CA certificates.
properties:
enabled:
default: true
description: Enabled toggles this feature on or off.
type: boolean
sources:
description: Sources defines the sources for all required custom
CA certificates.
description: Sources defines the sources for all required custom CA certificates.
properties:
clientCA:
description: ClientCA specifies the client-ca cert/key pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
The secret must contain specific keys based on the credential type:
- For TLS certificate pairs (e.g., ServerCA): `tls.crt` and `tls.key`.
- For the ServiceAccountToken signing key: `tls.key`.
type: string
required:
- secretName
type: object
etcdPeerCA:
description: ETCDPeerCA specifies the etcd-peer-ca cert/key
pair.
description: ETCDPeerCA specifies the etcd-peer-ca cert/key pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
The secret must contain specific keys based on the credential type:
- For TLS certificate pairs (e.g., ServerCA): `tls.crt` and `tls.key`.
- For the ServiceAccountToken signing key: `tls.key`.
type: string
required:
- secretName
type: object
etcdServerCA:
description: ETCDServerCA specifies the etcd-server-ca cert/key
pair.
description: ETCDServerCA specifies the etcd-server-ca cert/key pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
The secret must contain specific keys based on the credential type:
- For TLS certificate pairs (e.g., ServerCA): `tls.crt` and `tls.key`.
- For the ServiceAccountToken signing key: `tls.key`.
type: string
required:
- secretName
type: object
requestHeaderCA:
description: RequestHeaderCA specifies the request-header-ca
cert/key pair.
description: RequestHeaderCA specifies the request-header-ca cert/key pair.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
The secret must contain specific keys based on the credential type:
- For TLS certificate pairs (e.g., ServerCA): `tls.crt` and `tls.key`.
- For the ServiceAccountToken signing key: `tls.key`.
type: string
required:
- secretName
@@ -296,24 +313,21 @@ spec:
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
The secret must contain specific keys based on the credential type:
- For TLS certificate pairs (e.g., ServerCA): `tls.crt` and `tls.key`.
- For the ServiceAccountToken signing key: `tls.key`.
type: string
required:
- secretName
type: object
serviceAccountToken:
description: ServiceAccountToken specifies the service-account-token
key.
description: ServiceAccountToken specifies the service-account-token key.
properties:
secretName:
description: |-
SecretName specifies the name of an existing secret to use.
The controller expects specific keys inside based on the credential type:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
The secret must contain specific keys based on the credential type:
- For TLS certificate pairs (e.g., ServerCA): `tls.crt` and `tls.key`.
- For the ServiceAccountToken signing key: `tls.key`.
type: string
required:
- secretName
@@ -336,23 +350,19 @@ spec:
By default, it's only exposed as a ClusterIP.
properties:
ingress:
description: Ingress specifies options for exposing the API server
through an Ingress.
description: Ingress specifies options for exposing the API server through an Ingress.
properties:
annotations:
additionalProperties:
type: string
description: Annotations specifies annotations to add to the
Ingress.
description: Annotations specifies annotations to add to the Ingress.
type: object
ingressClassName:
description: IngressClassName specifies the IngressClass to
use for the Ingress.
description: IngressClassName specifies the IngressClass to use for the Ingress.
type: string
type: object
loadBalancer:
description: LoadBalancer specifies options for exposing the API
server through a LoadBalancer service.
description: LoadBalancer specifies options for exposing the API server through a LoadBalancer service.
properties:
etcdPort:
description: |-
@@ -370,8 +380,7 @@ spec:
type: integer
type: object
nodePort:
description: NodePort specifies options for exposing the API server
through NodePort.
description: NodePort specifies options for exposing the API server through NodePort.
properties:
etcdPort:
description: |-
@@ -390,10 +399,8 @@ spec:
type: object
type: object
x-kubernetes-validations:
- message: ingress, loadbalancer and nodePort are mutually exclusive;
only one can be set
rule: '[has(self.ingress), has(self.loadBalancer), has(self.nodePort)].filter(x,
x).size() <= 1'
- message: ingress, loadbalancer and nodePort are mutually exclusive; only one can be set
rule: '[has(self.ingress), has(self.loadBalancer), has(self.nodePort)].filter(x, x).size() <= 1'
mirrorHostNodes:
description: |-
MirrorHostNodes controls whether node objects from the host cluster
@@ -434,11 +441,18 @@ spec:
This field is only relevant in "dynamic" mode.
type: string
storageRequestSize:
anyOf:
- type: integer
- type: string
default: 2G
description: |-
StorageRequestSize is the requested size for the PVC.
This field is only relevant in "dynamic" mode.
type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
x-kubernetes-validations:
- message: storageRequestSize is immutable
rule: self == oldSelf
type:
default: dynamic
description: Type specifies the persistence mode.
@@ -449,6 +463,94 @@ spec:
PriorityClass specifies the priorityClassName for server/agent pods.
In "shared" mode, this also applies to workloads.
type: string
secretMounts:
description: |-
SecretMounts specifies a list of secrets to mount into server and agent pods.
Each entry defines a secret and its mount path within the pods.
items:
description: |-
SecretMount defines a secret to be mounted into server or agent pods,
allowing for custom configurations, certificates, or other sensitive data.
properties:
defaultMode:
description: |-
defaultMode is Optional: mode bits used to set permissions on created files by default.
Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
YAML accepts both octal and decimal values, JSON requires decimal values
for mode bits. Defaults to 0644.
Directories within the path are not affected by this setting.
This might be in conflict with other options that affect the file
mode, like fsGroup, and the result can be other mode bits set.
format: int32
type: integer
items:
description: |-
items If unspecified, each key-value pair in the Data field of the referenced
Secret will be projected into the volume as a file whose name is the
key and content is the value. If specified, the listed keys will be
projected into the specified paths, and unlisted keys will not be
present. If a key is specified which is not present in the Secret,
the volume setup will error unless it is marked optional. Paths must be
relative and may not contain the '..' path or start with '..'.
items:
description: Maps a string key to a path within a volume.
properties:
key:
description: key is the key to project.
type: string
mode:
description: |-
mode is Optional: mode bits used to set permissions on this file.
Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
If not specified, the volume defaultMode will be used.
This might be in conflict with other options that affect the file
mode, like fsGroup, and the result can be other mode bits set.
format: int32
type: integer
path:
description: |-
path is the relative path of the file to map the key to.
May not be an absolute path.
May not contain the path element '..'.
May not start with the string '..'.
type: string
required:
- key
- path
type: object
type: array
x-kubernetes-list-type: atomic
mountPath:
description: |-
MountPath is the path within server and agent pods where the
secret contents will be mounted.
type: string
optional:
description: optional field specify whether the Secret or its keys must be defined
type: boolean
role:
description: |-
Role is the type of the k3k pod that will be used to mount the secret.
This can be 'server', 'agent', or 'all' (for both).
enum:
- server
- agent
- all
type: string
secretName:
description: |-
secretName is the name of the secret in the pod's namespace to use.
More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
type: string
subPath:
description: |-
SubPath is an optional path within the secret to mount instead of the root.
When specified, only the specified key from the secret will be mounted as a file
at MountPath, keeping the parent directory writable.
type: string
type: object
type: array
serverArgs:
description: |-
ServerArgs specifies ordered key-value pairs for K3s server pods.
@@ -457,14 +559,14 @@ spec:
type: string
type: array
serverEnvs:
description: ServerEnvs specifies list of environment variables to
set in the server pod.
description: ServerEnvs specifies list of environment variables to set in the server pod.
items:
description: EnvVar represents an environment variable present in
a Container.
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -479,8 +581,7 @@ spec:
Defaults to "".
type: string
valueFrom:
description: Source for the environment variable's value. Cannot
be used if value is not empty.
description: Source for the environment variable's value. Cannot be used if value is not empty.
properties:
configMapKeyRef:
description: Selects a key of a ConfigMap.
@@ -498,8 +599,7 @@ spec:
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
type: string
optional:
description: Specify whether the ConfigMap or its key
must be defined
description: Specify whether the ConfigMap or its key must be defined
type: boolean
required:
- key
@@ -511,32 +611,64 @@ spec:
spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
properties:
apiVersion:
description: Version of the schema the FieldPath is
written in terms of, defaults to "v1".
description: Version of the schema the FieldPath is written in terms of, defaults to "v1".
type: string
fieldPath:
description: Path of the field to select in the specified
API version.
description: Path of the field to select in the specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
properties:
containerName:
description: 'Container name: required for volumes,
optional for env vars'
description: 'Container name: required for volumes, optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format of the exposed
resources, defaults to "1"
description: Specifies the output format of the exposed resources, defaults to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
@@ -550,8 +682,7 @@ spec:
description: Selects a key of a secret in the pod's namespace
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
description: The key of the secret to select from. Must be a valid secret key.
type: string
name:
default: ""
@@ -563,8 +694,7 @@ spec:
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
type: string
optional:
description: Specify whether the Secret or its key must
be defined
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
@@ -605,8 +735,7 @@ spec:
rule: self == oldSelf
sync:
default: {}
description: Sync specifies the resources types that will be synced
from virtual cluster to host cluster.
description: Sync specifies the resources types that will be synced from virtual cluster to host cluster.
properties:
configMaps:
default:
@@ -720,10 +849,28 @@ spec:
required:
- enabled
type: object
storageClasses:
default:
enabled: false
description: StorageClasses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
type: object
tlsSANs:
description: TLSSANs specifies subject alternative names for the K3s
server certificate.
description: TLSSANs specifies subject alternative names for the K3s server certificate.
items:
type: string
type: array
@@ -733,12 +880,10 @@ spec:
The Secret must have a "token" field in its data.
properties:
name:
description: name is unique within a namespace to reference a
secret resource.
description: name is unique within a namespace to reference a secret resource.
type: string
namespace:
description: namespace defines the space within which the secret
name must be unique.
description: namespace defines the space within which the secret name must be unique.
type: string
type: object
x-kubernetes-map-type: atomic
@@ -769,11 +914,9 @@ spec:
description: ClusterDNS is the IP address for the CoreDNS service.
type: string
conditions:
description: Conditions are the individual conditions for the cluster
set.
description: Conditions are the individual conditions for the cluster set.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
description: Condition contains details for one aspect of the current state of this API Resource.
properties:
lastTransitionTime:
description: |-
@@ -830,13 +973,11 @@ spec:
description: HostVersion is the Kubernetes version of the host node.
type: string
kubeletPort:
description: KubeletPort specefies the port used by k3k-kubelet in
shared mode.
description: KubeletPort specefies the port used by k3k-kubelet in shared mode.
type: integer
phase:
default: Unknown
description: Phase is a high-level summary of the cluster's current
lifecycle state.
description: Phase is a high-level summary of the cluster's current lifecycle state.
enum:
- Pending
- Provisioning
@@ -845,22 +986,174 @@ spec:
- Terminating
- Unknown
type: string
policy:
description: |-
policy represents the status of the policy applied to this cluster.
This field is set by the VirtualClusterPolicy controller.
properties:
name:
description: name is the name of the VirtualClusterPolicy currently applied to this cluster.
minLength: 1
type: string
nodeSelector:
additionalProperties:
type: string
description: nodeSelector is a node selector enforced by the active VirtualClusterPolicy.
type: object
priorityClass:
description: priorityClass is the priority class enforced by the active VirtualClusterPolicy.
type: string
sync:
description: sync is the SyncConfig enforced by the active VirtualClusterPolicy.
properties:
configMaps:
default:
enabled: true
description: ConfigMaps resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
ingresses:
default:
enabled: false
description: Ingresses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
persistentVolumeClaims:
default:
enabled: true
description: PersistentVolumeClaims resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
priorityClasses:
default:
enabled: false
description: PriorityClasses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
secrets:
default:
enabled: true
description: Secrets resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
services:
default:
enabled: true
description: Services resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
storageClasses:
default:
enabled: false
description: StorageClasses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
type: object
required:
- name
type: object
policyName:
description: PolicyName specifies the virtual cluster policy name
bound to the virtual cluster.
description: PolicyName specifies the virtual cluster policy name bound to the virtual cluster.
type: string
serviceCIDR:
description: ServiceCIDR is the CIDR range for service IPs.
type: string
tlsSANs:
description: TLSSANs specifies subject alternative names for the K3s
server certificate.
description: TLSSANs specifies subject alternative names for the K3s server certificate.
items:
type: string
type: array
webhookPort:
description: WebhookPort specefies the port used by webhook in k3k-kubelet
in shared mode.
description: WebhookPort specefies the port used by webhook in k3k-kubelet in shared mode.
type: integer
type: object
type: object

View File

@@ -3,8 +3,8 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.20.0
helm.sh/resource-policy: keep
controller-gen.kubebuilder.io/version: v0.16.0
name: virtualclusterpolicies.k3k.io
spec:
group: k3k.io
@@ -51,8 +51,7 @@ spec:
properties:
allowedMode:
default: shared
description: AllowedMode specifies the allowed cluster provisioning
mode. Defaults to "shared".
description: AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared".
enum:
- shared
- virtual
@@ -63,16 +62,13 @@ spec:
defaultNodeSelector:
additionalProperties:
type: string
description: DefaultNodeSelector specifies the node selector that
applies to all clusters (server + agent) in the target Namespace.
description: DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace.
type: object
defaultPriorityClass:
description: DefaultPriorityClass specifies the priorityClassName
applied to all pods of all clusters in the target Namespace.
description: DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the target Namespace.
type: string
disableNetworkPolicy:
description: DisableNetworkPolicy indicates whether to disable the
creation of a default network policy for cluster isolation.
description: DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation.
type: boolean
limit:
description: |-
@@ -80,11 +76,9 @@ spec:
to set defaults and constraints (min/max)
properties:
limits:
description: Limits is the list of LimitRangeItem objects that
are enforced.
description: Limits is the list of LimitRangeItem objects that are enforced.
items:
description: LimitRangeItem defines a min/max usage limit for
any resource that matches on kind.
description: LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
properties:
default:
additionalProperties:
@@ -93,8 +87,7 @@ spec:
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Default resource requirement limit value by
resource name if resource limit is omitted.
description: Default resource requirement limit value by resource name if resource limit is omitted.
type: object
defaultRequest:
additionalProperties:
@@ -103,9 +96,7 @@ spec:
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: DefaultRequest is the default resource requirement
request value by resource name if resource request is
omitted.
description: DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
type: object
max:
additionalProperties:
@@ -114,8 +105,7 @@ spec:
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Max usage constraints on this kind by resource
name.
description: Max usage constraints on this kind by resource name.
type: object
maxLimitRequestRatio:
additionalProperties:
@@ -124,11 +114,7 @@ spec:
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: MaxLimitRequestRatio if specified, the named
resource must have a request and limit that are both non-zero
where limit divided by request is less than or equal to
the enumerated value; this represents the max burst for
the named resource.
description: MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
type: object
min:
additionalProperties:
@@ -137,8 +123,7 @@ spec:
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Min usage constraints on this kind by resource
name.
description: Min usage constraints on this kind by resource name.
type: object
type:
description: Type of resource that this limit applies to.
@@ -152,16 +137,14 @@ spec:
- limits
type: object
podSecurityAdmissionLevel:
description: PodSecurityAdmissionLevel specifies the pod security
admission level applied to the pods in the namespace.
description: PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace.
enum:
- privileged
- baseline
- restricted
type: string
quota:
description: Quota specifies the resource limits for clusters within
a clusterpolicy.
description: Quota specifies the resource limits for clusters within a clusterpolicy.
properties:
hard:
additionalProperties:
@@ -181,8 +164,7 @@ spec:
For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
properties:
matchExpressions:
description: A list of scope selector requirements by scope
of the resources.
description: A list of scope selector requirements by scope of the resources.
items:
description: |-
A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
@@ -194,8 +176,7 @@ spec:
Valid operators are In, NotIn, Exists, DoesNotExist.
type: string
scopeName:
description: The name of the scope that the selector
applies to.
description: The name of the scope that the selector applies to.
type: string
values:
description: |-
@@ -220,16 +201,14 @@ spec:
A collection of filters that must match each object tracked by a quota.
If not specified, the quota matches all objects.
items:
description: A ResourceQuotaScope defines a filter that must
match each object tracked by a quota
description: A ResourceQuotaScope defines a filter that must match each object tracked by a quota
type: string
type: array
x-kubernetes-list-type: atomic
type: object
sync:
default: {}
description: Sync specifies the resources types that will be synced
from virtual cluster to host cluster.
description: Sync specifies the resources types that will be synced from virtual cluster to host cluster.
properties:
configMaps:
default:
@@ -343,17 +322,34 @@ spec:
required:
- enabled
type: object
storageClasses:
default:
enabled: false
description: StorageClasses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
type: object
type: object
status:
description: Status reflects the observed state of the VirtualClusterPolicy.
properties:
conditions:
description: Conditions are the individual conditions for the cluster
set.
description: Conditions are the individual conditions for the cluster set.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
description: Condition contains details for one aspect of the current state of this API Resource.
properties:
lastTransitionTime:
description: |-
@@ -407,12 +403,10 @@ spec:
type: object
type: array
lastUpdateTime:
description: LastUpdate is the timestamp when the status was last
updated.
description: LastUpdate is the timestamp when the status was last updated.
type: string
observedGeneration:
description: ObservedGeneration was the generation at the time the
status was updated.
description: ObservedGeneration was the generation at the time the status was updated.
format: int64
type: integer
summary:

View File

@@ -23,9 +23,11 @@ rules:
resources:
- "nodes"
- "nodes/proxy"
- "namespaces"
verbs:
- "get"
- "list"
- "watch"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1

View File

@@ -12,6 +12,7 @@ func NewClusterCmd(appCtx *AppContext) *cobra.Command {
cmd.AddCommand(
NewClusterCreateCmd(appCtx),
NewClusterUpdateCmd(appCtx),
NewClusterDeleteCmd(appCtx),
NewClusterListCmd(appCtx),
)

View File

@@ -13,12 +13,13 @@ import (
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
@@ -117,7 +118,10 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
logrus.Infof("Creating cluster '%s' in namespace '%s'", name, namespace)
cluster := newCluster(name, namespace, config)
cluster, err := newCluster(name, namespace, config)
if err != nil {
return err
}
cluster.Spec.Expose = &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{},
@@ -148,9 +152,9 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
return fmt.Errorf("failed to wait for cluster to be reconciled: %w", err)
}
clusterDetails, err := printClusterDetails(cluster)
clusterDetails, err := getClusterDetails(cluster)
if err != nil {
return fmt.Errorf("failed to print cluster details: %w", err)
return fmt.Errorf("failed to get cluster details: %w", err)
}
logrus.Info(clusterDetails)
@@ -185,7 +189,17 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
}
}
func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
func newCluster(name, namespace string, config *CreateConfig) (*v1beta1.Cluster, error) {
var storageRequestSize *resource.Quantity
if config.storageRequestSize != "" {
parsed, err := resource.ParseQuantity(config.storageRequestSize)
if err != nil {
return nil, err
}
storageRequestSize = ptr.To(parsed)
}
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
@@ -211,7 +225,7 @@ func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
Persistence: v1beta1.PersistenceConfig{
Type: v1beta1.PersistenceMode(config.persistenceType),
StorageClassName: ptr.To(config.storageClassName),
StorageRequestSize: config.storageRequestSize,
StorageRequestSize: storageRequestSize,
},
MirrorHostNodes: config.mirrorHostNodes,
},
@@ -221,7 +235,7 @@ func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
}
if config.token != "" {
cluster.Spec.TokenSecretRef = &v1.SecretReference{
cluster.Spec.TokenSecretRef = &corev1.SecretReference{
Name: k3kcluster.TokenSecretName(name),
Namespace: namespace,
}
@@ -253,11 +267,11 @@ func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
}
}
return cluster
return cluster, nil
}
func env(envSlice []string) []v1.EnvVar {
var envVars []v1.EnvVar
func env(envSlice []string) []corev1.EnvVar {
var envVars []corev1.EnvVar
for _, env := range envSlice {
keyValue := strings.Split(env, "=")
@@ -265,7 +279,7 @@ func env(envSlice []string) []v1.EnvVar {
logrus.Fatalf("incorrect value for environment variable %s", env)
}
envVars = append(envVars, v1.EnvVar{
envVars = append(envVars, corev1.EnvVar{
Name: keyValue[0],
Value: keyValue[1],
})
@@ -352,8 +366,8 @@ func CreateCustomCertsSecrets(ctx context.Context, name, namespace, customCertsP
return nil
}
func caCertSecret(certName, clusterName, clusterNamespace string, cert, key []byte) *v1.Secret {
return &v1.Secret{
func caCertSecret(certName, clusterName, clusterNamespace string, cert, key []byte) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
@@ -362,10 +376,10 @@ func caCertSecret(certName, clusterName, clusterNamespace string, cert, key []by
Name: controller.SafeConcatNameWithPrefix(clusterName, certName),
Namespace: clusterNamespace,
},
Type: v1.SecretTypeTLS,
Type: corev1.SecretTypeTLS,
Data: map[string][]byte{
v1.TLSCertKey: cert,
v1.TLSPrivateKeyKey: key,
corev1.TLSCertKey: cert,
corev1.TLSPrivateKeyKey: key,
},
}
}
@@ -399,9 +413,13 @@ const clusterDetailsTemplate = `Cluster details:
Persistence:
Type: {{.Persistence.Type}}{{ if .Persistence.StorageClassName }}
StorageClass: {{ .Persistence.StorageClassName }}{{ end }}{{ if .Persistence.StorageRequestSize }}
Size: {{ .Persistence.StorageRequestSize }}{{ end }}`
Size: {{ .Persistence.StorageRequestSize }}{{ end }}{{ if .Labels }}
Labels: {{ range $key, $value := .Labels }}
{{$key}}: {{$value}}{{ end }}{{ end }}{{ if .Annotations }}
Annotations: {{ range $key, $value := .Annotations }}
{{$key}}: {{$value}}{{ end }}{{ end }}`
func printClusterDetails(cluster *v1beta1.Cluster) (string, error) {
func getClusterDetails(cluster *v1beta1.Cluster) (string, error) {
type templateData struct {
Mode v1beta1.ClusterMode
Servers int32
@@ -413,6 +431,8 @@ func printClusterDetails(cluster *v1beta1.Cluster) (string, error) {
StorageClassName string
StorageRequestSize string
}
Labels map[string]string
Annotations map[string]string
}
data := templateData{
@@ -421,11 +441,16 @@ func printClusterDetails(cluster *v1beta1.Cluster) (string, error) {
Agents: ptr.Deref(cluster.Spec.Agents, 0),
Version: cluster.Spec.Version,
HostVersion: cluster.Status.HostVersion,
Annotations: cluster.Annotations,
Labels: cluster.Labels,
}
data.Persistence.Type = cluster.Spec.Persistence.Type
data.Persistence.StorageClassName = ptr.Deref(cluster.Spec.Persistence.StorageClassName, "")
data.Persistence.StorageRequestSize = cluster.Spec.Persistence.StorageRequestSize
if srs := cluster.Spec.Persistence.StorageRequestSize; srs != nil {
data.Persistence.StorageRequestSize = srs.String()
}
tmpl, err := template.New("clusterDetails").Parse(clusterDetailsTemplate)
if err != nil {

View File

@@ -4,6 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/utils/ptr"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
@@ -66,7 +67,7 @@ func Test_printClusterDetails(t *testing.T) {
Persistence: v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
StorageClassName: ptr.To("local-path"),
StorageRequestSize: "3gb",
StorageRequestSize: ptr.To(resource.MustParse("3G")),
},
},
Status: v1beta1.ClusterStatus{
@@ -81,13 +82,13 @@ func Test_printClusterDetails(t *testing.T) {
Persistence:
Type: dynamic
StorageClass: local-path
Size: 3gb`,
Size: 3G`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
clusterDetails, err := printClusterDetails(tt.cluster)
clusterDetails, err := getClusterDetails(tt.cluster)
assert.NoError(t, err)
assert.Equal(t, tt.want, clusterDetails)
})

198
cli/cmds/cluster_update.go Normal file
View File

@@ -0,0 +1,198 @@
package cmds
import (
"bufio"
"errors"
"fmt"
"os"
"strings"
"github.com/blang/semver/v4"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
)
type UpdateConfig struct {
servers int32
agents int32
labels []string
annotations []string
version string
noConfirm bool
}
func NewClusterUpdateCmd(appCtx *AppContext) *cobra.Command {
updateConfig := &UpdateConfig{}
cmd := &cobra.Command{
Use: "update",
Short: "Update existing cluster",
Example: "k3kcli cluster update [command options] NAME",
RunE: updateAction(appCtx, updateConfig),
Args: cobra.ExactArgs(1),
}
CobraFlagNamespace(appCtx, cmd.Flags())
updateFlags(cmd, updateConfig)
return cmd
}
func updateFlags(cmd *cobra.Command, cfg *UpdateConfig) {
cmd.Flags().Int32Var(&cfg.servers, "servers", 1, "number of servers")
cmd.Flags().Int32Var(&cfg.agents, "agents", 0, "number of agents")
cmd.Flags().StringArrayVar(&cfg.labels, "labels", []string{}, "Labels to add to the cluster object (e.g. key=value)")
cmd.Flags().StringArrayVar(&cfg.annotations, "annotations", []string{}, "Annotations to add to the cluster object (e.g. key=value)")
cmd.Flags().StringVar(&cfg.version, "version", "", "k3s version")
cmd.Flags().BoolVarP(&cfg.noConfirm, "no-confirm", "y", false, "Skip interactive approval before applying update")
}
func updateAction(appCtx *AppContext, config *UpdateConfig) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
client := appCtx.Client
name := args[0]
if name == k3kcluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
namespace := appCtx.Namespace(name)
var virtualCluster v1beta1.Cluster
clusterKey := types.NamespacedName{Name: name, Namespace: appCtx.namespace}
if err := appCtx.Client.Get(ctx, clusterKey, &virtualCluster); err != nil {
if apierrors.IsNotFound(err) {
return fmt.Errorf("cluster %s not found in namespace %s", name, appCtx.namespace)
}
return fmt.Errorf("failed to fetch cluster: %w", err)
}
var changes []change
if cmd.Flags().Changed("version") && config.version != virtualCluster.Spec.Version {
currentVersion := virtualCluster.Spec.Version
if currentVersion == "" {
currentVersion = virtualCluster.Status.HostVersion
}
currentVersionSemver, err := semver.ParseTolerant(currentVersion)
if err != nil {
return fmt.Errorf("failed to parse current cluster version %w", err)
}
newVersionSemver, err := semver.ParseTolerant(config.version)
if err != nil {
return fmt.Errorf("failed to parse new cluster version %w", err)
}
if newVersionSemver.LT(currentVersionSemver) {
return fmt.Errorf("downgrading cluster version is not supported")
}
changes = append(changes, change{"Version", currentVersion, config.version})
virtualCluster.Spec.Version = config.version
}
if cmd.Flags().Changed("servers") {
var oldServers int32
if virtualCluster.Spec.Agents != nil {
oldServers = *virtualCluster.Spec.Servers
}
if oldServers != config.servers {
changes = append(changes, change{"Servers", fmt.Sprintf("%d", oldServers), fmt.Sprintf("%d", config.servers)})
virtualCluster.Spec.Servers = ptr.To(config.servers)
}
}
if cmd.Flags().Changed("agents") {
var oldAgents int32
if virtualCluster.Spec.Agents != nil {
oldAgents = *virtualCluster.Spec.Agents
}
if oldAgents != config.agents {
changes = append(changes, change{"Agents", fmt.Sprintf("%d", oldAgents), fmt.Sprintf("%d", config.agents)})
virtualCluster.Spec.Agents = ptr.To(config.agents)
}
}
var labelChanges []change
if cmd.Flags().Changed("labels") {
oldLabels := labels.Merge(nil, virtualCluster.Labels)
virtualCluster.Labels = labels.Merge(virtualCluster.Labels, parseKeyValuePairs(config.labels, "label"))
labelChanges = diffMaps(oldLabels, virtualCluster.Labels)
}
var annotationChanges []change
if cmd.Flags().Changed("annotations") {
oldAnnotations := labels.Merge(nil, virtualCluster.Annotations)
virtualCluster.Annotations = labels.Merge(virtualCluster.Annotations, parseKeyValuePairs(config.annotations, "annotation"))
annotationChanges = diffMaps(oldAnnotations, virtualCluster.Annotations)
}
if len(changes) == 0 && len(labelChanges) == 0 && len(annotationChanges) == 0 {
logrus.Info("No changes detected, skipping update")
return nil
}
logrus.Infof("Updating cluster '%s' in namespace '%s'", name, namespace)
printDiff(changes)
printMapDiff("Labels", labelChanges)
printMapDiff("Annotations", annotationChanges)
if !config.noConfirm {
if !confirmClusterUpdate(&virtualCluster) {
return nil
}
}
if err := client.Update(ctx, &virtualCluster); err != nil {
return err
}
logrus.Info("Cluster updated successfully")
return nil
}
}
func confirmClusterUpdate(cluster *v1beta1.Cluster) bool {
clusterDetails, err := getClusterDetails(cluster)
if err != nil {
logrus.Fatalf("unable to get cluster details: %v", err)
}
fmt.Printf("\nNew %s\n", clusterDetails)
fmt.Printf("\nDo you want to update the cluster? [y/N]: ")
scanner := bufio.NewScanner(os.Stdin)
if !scanner.Scan() {
if err := scanner.Err(); err != nil {
logrus.Errorf("Error reading input: %v", err)
}
return false
}
fmt.Printf("\n")
return strings.ToLower(strings.TrimSpace(scanner.Text())) == "y"
}

53
cli/cmds/diff_printer.go Normal file
View File

@@ -0,0 +1,53 @@
package cmds
import "fmt"
type change struct {
field string
oldValue string
newValue string
}
func printDiff(changes []change) {
for _, c := range changes {
if c.oldValue == c.newValue {
continue
}
fmt.Printf("%s: %s -> %s\n", c.field, c.oldValue, c.newValue)
}
}
func printMapDiff(title string, changes []change) {
if len(changes) == 0 {
return
}
fmt.Printf("%s:\n", title)
for _, c := range changes {
switch c.oldValue {
case "":
fmt.Printf(" %s=%s (new)\n", c.field, c.newValue)
default:
fmt.Printf(" %s=%s -> %s=%s\n", c.field, c.oldValue, c.field, c.newValue)
}
}
}
func diffMaps(oldMap, newMap map[string]string) []change {
var changes []change
// Check for new and changed keys
for k, newVal := range newMap {
if oldVal, exists := oldMap[k]; exists {
if oldVal != newVal {
changes = append(changes, change{k, oldVal, newVal})
}
} else {
changes = append(changes, change{k, "", newVal})
}
}
return changes
}

View File

@@ -133,6 +133,40 @@ k3kcli cluster list [command options]
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
----
== k3kcli cluster update
Update existing cluster
----
k3kcli cluster update [flags]
----
=== Examples
----
k3kcli cluster update [command options] NAME
----
=== Options
----
--agents int32 number of agents
--annotations stringArray Annotations to add to the cluster object (e.g. key=value)
-h, --help help for update
--labels stringArray Labels to add to the cluster object (e.g. key=value)
-n, --namespace string namespace of the k3k cluster
-y, --no-confirm Skip interactive approval before applying update
--servers int32 number of servers (default 1)
--version string k3s version
----
=== Options inherited from parent commands
----
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
----
== k3kcli kubeconfig
Manage kubeconfig for clusters.

View File

@@ -21,4 +21,5 @@ K3k cluster command.
* [k3kcli cluster create](k3kcli_cluster_create.md) - Create a new cluster.
* [k3kcli cluster delete](k3kcli_cluster_delete.md) - Delete an existing cluster.
* [k3kcli cluster list](k3kcli_cluster_list.md) - List all existing clusters.
* [k3kcli cluster update](k3kcli_cluster_update.md) - Update existing cluster

View File

@@ -0,0 +1,38 @@
## k3kcli cluster update
Update existing cluster
```
k3kcli cluster update [flags]
```
### Examples
```
k3kcli cluster update [command options] NAME
```
### Options
```
--agents int32 number of agents
--annotations stringArray Annotations to add to the cluster object (e.g. key=value)
-h, --help help for update
--labels stringArray Labels to add to the cluster object (e.g. key=value)
-n, --namespace string namespace of the k3k cluster
-y, --no-confirm Skip interactive approval before applying update
--servers int32 number of servers (default 1)
--version string k3s version
```
### Options inherited from parent commands
```
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
```
### SEE ALSO
* [k3kcli cluster](k3kcli_cluster.md) - K3k cluster command.

View File

@@ -41,6 +41,30 @@ _Appears In:_
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-appliedpolicy"]
=== AppliedPolicy
AppliedPolicy defines the observed state of an applied policy.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterstatus[$$ClusterStatus$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`name`* __string__ | name is the name of the VirtualClusterPolicy currently applied to this cluster. + | | MinLength: 1 +
| *`priorityClass`* __string__ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. + | |
| *`nodeSelector`* __object (keys:string, values:string)__ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. + | |
| *`sync`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]__ | sync is the SyncConfig enforced by the active VirtualClusterPolicy. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster"]
=== Cluster
@@ -182,6 +206,8 @@ Example: ["--node-name=my-agent-node"] + | |
are mirrored into the virtual cluster. + | |
| *`customCAs`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-customcas[$$CustomCAs$$]__ | CustomCAs specifies the cert/key pairs for custom CA certificates. + | |
| *`sync`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]__ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. + | { } |
| *`secretMounts`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-secretmount[$$SecretMount$$] array__ | SecretMounts specifies a list of secrets to mount into server and agent pods. +
Each entry defines a secret and its mount path within the pods. + | |
|===
@@ -192,7 +218,7 @@ are mirrored into the virtual cluster. + | |
ConfigMapSyncConfig specifies the sync options for services.
ConfigMapSyncConfig specifies the sync options for ConfigMaps.
@@ -226,10 +252,9 @@ _Appears In:_
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`secretName`* __string__ | SecretName specifies the name of an existing secret to use. +
The controller expects specific keys inside based on the credential type: +
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'. +
- For ServiceAccountTokenKey: 'tls.key'. + | |
| *`secretName`* __string__ | The secret must contain specific keys based on the credential type: +
- For TLS certificate pairs (e.g., ServerCA): `tls.crt` and `tls.key`. +
- For the ServiceAccountToken signing key: `tls.key`. + | |
|===
@@ -328,7 +353,7 @@ _Appears In:_
IngressSyncConfig specifies the sync options for services.
IngressSyncConfig specifies the sync options for Ingresses.
@@ -414,7 +439,7 @@ _Appears In:_
| *`type`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistencemode[$$PersistenceMode$$]__ | Type specifies the persistence mode. + | dynamic |
| *`storageClassName`* __string__ | StorageClassName is the name of the StorageClass to use for the PVC. +
This field is only relevant in "dynamic" mode. + | |
| *`storageRequestSize`* __string__ | StorageRequestSize is the requested size for the PVC. +
| *`storageRequestSize`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#quantity-resource-api[$$Quantity$$]__ | StorageRequestSize is the requested size for the PVC. +
This field is only relevant in "dynamic" mode. + | 2G |
|===
@@ -439,7 +464,7 @@ _Appears In:_
PersistentVolumeClaimSyncConfig specifies the sync options for services.
PersistentVolumeClaimSyncConfig specifies the sync options for PersistentVolumeClaims.
@@ -477,7 +502,7 @@ _Appears In:_
PriorityClassSyncConfig specifies the sync options for services.
PriorityClassSyncConfig specifies the sync options for PriorityClasses.
@@ -494,12 +519,57 @@ then all resources of the given type will be synced. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-secretmount"]
=== SecretMount
SecretMount defines a secret to be mounted into server or agent pods,
allowing for custom configurations, certificates, or other sensitive data.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`secretName`* __string__ | secretName is the name of the secret in the pod's namespace to use. +
More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + | |
| *`items`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#keytopath-v1-core[$$KeyToPath$$] array__ | items If unspecified, each key-value pair in the Data field of the referenced +
Secret will be projected into the volume as a file whose name is the +
key and content is the value. If specified, the listed keys will be +
projected into the specified paths, and unlisted keys will not be +
present. If a key is specified which is not present in the Secret, +
the volume setup will error unless it is marked optional. Paths must be +
relative and may not contain the '..' path or start with '..'. + | |
| *`defaultMode`* __integer__ | defaultMode is Optional: mode bits used to set permissions on created files by default. +
Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. +
YAML accepts both octal and decimal values, JSON requires decimal values +
for mode bits. Defaults to 0644. +
Directories within the path are not affected by this setting. +
This might be in conflict with other options that affect the file +
mode, like fsGroup, and the result can be other mode bits set. + | |
| *`optional`* __boolean__ | optional field specify whether the Secret or its keys must be defined + | |
| *`mountPath`* __string__ | MountPath is the path within server and agent pods where the +
secret contents will be mounted. + | |
| *`subPath`* __string__ | SubPath is an optional path within the secret to mount instead of the root. +
When specified, only the specified key from the secret will be mounted as a file +
at MountPath, keeping the parent directory writable. + | |
| *`role`* __string__ | Role is the type of the k3k pod that will be used to mount the secret. +
This can be 'server', 'agent', or 'all' (for both). + | | Enum: [server agent all] +
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-secretsyncconfig"]
=== SecretSyncConfig
SecretSyncConfig specifies the sync options for services.
SecretSyncConfig specifies the sync options for Secrets.
@@ -521,7 +591,7 @@ then all resources of the given type will be synced. + | |
ServiceSyncConfig specifies the sync options for services.
ServiceSyncConfig specifies the sync options for Services.
@@ -538,6 +608,28 @@ then all resources of the given type will be synced. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-storageclasssyncconfig"]
=== StorageClassSyncConfig
StorageClassSyncConfig specifies the sync options for StorageClasses.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | false |
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
then all resources of the given type will be synced. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig"]
=== SyncConfig
@@ -549,6 +641,7 @@ SyncConfig will contain the resources that should be synced from virtual cluster
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-appliedpolicy[$$AppliedPolicy$$]
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]
@@ -561,6 +654,7 @@ _Appears In:_
| *`ingresses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-ingresssyncconfig[$$IngressSyncConfig$$]__ | Ingresses resources sync configuration. + | { enabled:false } |
| *`persistentVolumeClaims`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistentvolumeclaimsyncconfig[$$PersistentVolumeClaimSyncConfig$$]__ | PersistentVolumeClaims resources sync configuration. + | { enabled:true } |
| *`priorityClasses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-priorityclasssyncconfig[$$PriorityClassSyncConfig$$]__ | PriorityClasses resources sync configuration. + | { enabled:false } |
| *`storageClasses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-storageclasssyncconfig[$$StorageClassSyncConfig$$]__ | StorageClasses resources sync configuration. + | { enabled:false } |
|===

View File

@@ -32,6 +32,25 @@ _Appears in:_
| `secretRef` _string_ | SecretRef is the name of the Secret. | | |
#### AppliedPolicy
AppliedPolicy defines the observed state of an applied policy.
_Appears in:_
- [ClusterStatus](#clusterstatus)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `name` _string_ | name is the name of the VirtualClusterPolicy currently applied to this cluster. | | MinLength: 1 <br /> |
| `priorityClass` _string_ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. | | |
| `nodeSelector` _object (keys:string, values:string)_ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. | | |
| `sync` _[SyncConfig](#syncconfig)_ | sync is the SyncConfig enforced by the active VirtualClusterPolicy. | | |
#### Cluster
@@ -135,6 +154,7 @@ _Appears in:_
| `mirrorHostNodes` _boolean_ | MirrorHostNodes controls whether node objects from the host cluster<br />are mirrored into the virtual cluster. | | |
| `customCAs` _[CustomCAs](#customcas)_ | CustomCAs specifies the cert/key pairs for custom CA certificates. | | |
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |
| `secretMounts` _[SecretMount](#secretmount) array_ | SecretMounts specifies a list of secrets to mount into server and agent pods.<br />Each entry defines a secret and its mount path within the pods. | | |
@@ -143,7 +163,7 @@ _Appears in:_
ConfigMapSyncConfig specifies the sync options for services.
ConfigMapSyncConfig specifies the sync options for ConfigMaps.
@@ -170,7 +190,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `secretName` _string_ | SecretName specifies the name of an existing secret to use.<br />The controller expects specific keys inside based on the credential type:<br />- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.<br />- For ServiceAccountTokenKey: 'tls.key'. | | |
| `secretName` _string_ | The secret must contain specific keys based on the credential type:<br />- For TLS certificate pairs (e.g., ServerCA): `tls.crt` and `tls.key`.<br />- For the ServiceAccountToken signing key: `tls.key`. | | |
#### CredentialSources
@@ -251,7 +271,7 @@ _Appears in:_
IngressSyncConfig specifies the sync options for services.
IngressSyncConfig specifies the sync options for Ingresses.
@@ -313,7 +333,7 @@ _Appears in:_
| --- | --- | --- | --- |
| `type` _[PersistenceMode](#persistencemode)_ | Type specifies the persistence mode. | dynamic | |
| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 2G | |
| `storageRequestSize` _[Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#quantity-resource-api)_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 2G | |
#### PersistenceMode
@@ -333,7 +353,7 @@ _Appears in:_
PersistentVolumeClaimSyncConfig specifies the sync options for services.
PersistentVolumeClaimSyncConfig specifies the sync options for PersistentVolumeClaims.
@@ -364,7 +384,7 @@ _Appears in:_
PriorityClassSyncConfig specifies the sync options for services.
PriorityClassSyncConfig specifies the sync options for PriorityClasses.
@@ -377,11 +397,34 @@ _Appears in:_
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### SecretMount
SecretMount defines a secret to be mounted into server or agent pods,
allowing for custom configurations, certificates, or other sensitive data.
_Appears in:_
- [ClusterSpec](#clusterspec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `secretName` _string_ | secretName is the name of the secret in the pod's namespace to use.<br />More info: https://kubernetes.io/docs/concepts/storage/volumes#secret | | |
| `items` _[KeyToPath](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#keytopath-v1-core) array_ | items If unspecified, each key-value pair in the Data field of the referenced<br />Secret will be projected into the volume as a file whose name is the<br />key and content is the value. If specified, the listed keys will be<br />projected into the specified paths, and unlisted keys will not be<br />present. If a key is specified which is not present in the Secret,<br />the volume setup will error unless it is marked optional. Paths must be<br />relative and may not contain the '..' path or start with '..'. | | |
| `defaultMode` _integer_ | defaultMode is Optional: mode bits used to set permissions on created files by default.<br />Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.<br />YAML accepts both octal and decimal values, JSON requires decimal values<br />for mode bits. Defaults to 0644.<br />Directories within the path are not affected by this setting.<br />This might be in conflict with other options that affect the file<br />mode, like fsGroup, and the result can be other mode bits set. | | |
| `optional` _boolean_ | optional field specify whether the Secret or its keys must be defined | | |
| `mountPath` _string_ | MountPath is the path within server and agent pods where the<br />secret contents will be mounted. | | |
| `subPath` _string_ | SubPath is an optional path within the secret to mount instead of the root.<br />When specified, only the specified key from the secret will be mounted as a file<br />at MountPath, keeping the parent directory writable. | | |
| `role` _string_ | Role is the type of the k3k pod that will be used to mount the secret.<br />This can be 'server', 'agent', or 'all' (for both). | | Enum: [server agent all] <br /> |
#### SecretSyncConfig
SecretSyncConfig specifies the sync options for services.
SecretSyncConfig specifies the sync options for Secrets.
@@ -398,7 +441,7 @@ _Appears in:_
ServiceSyncConfig specifies the sync options for services.
ServiceSyncConfig specifies the sync options for Services.
@@ -411,6 +454,23 @@ _Appears in:_
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### StorageClassSyncConfig
StorageClassSyncConfig specifies the sync options for StorageClasses.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### SyncConfig
@@ -420,6 +480,7 @@ SyncConfig will contain the resources that should be synced from virtual cluster
_Appears in:_
- [AppliedPolicy](#appliedpolicy)
- [ClusterSpec](#clusterspec)
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
@@ -431,6 +492,7 @@ _Appears in:_
| `ingresses` _[IngressSyncConfig](#ingresssyncconfig)_ | Ingresses resources sync configuration. | \{ enabled:false \} | |
| `persistentVolumeClaims` _[PersistentVolumeClaimSyncConfig](#persistentvolumeclaimsyncconfig)_ | PersistentVolumeClaims resources sync configuration. | \{ enabled:true \} | |
| `priorityClasses` _[PriorityClassSyncConfig](#priorityclasssyncconfig)_ | PriorityClasses resources sync configuration. | \{ enabled:false \} | |
| `storageClasses` _[StorageClassSyncConfig](#storageclasssyncconfig)_ | StorageClasses resources sync configuration. | \{ enabled:false \} | |
#### VirtualClusterPolicy

View File

@@ -11,6 +11,18 @@ To start developing K3k you will need:
- A running Kubernetes cluster
> [!IMPORTANT]
>
> Virtual clusters in shared mode need to have a configured storage provider, unless the `--persistence-type ephemeral` flag is used.
>
> To install the [`local-path-provisioner`](https://github.com/rancher/local-path-provisioner) and set it as the default storage class you can run:
>
> ```
> kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.34/deploy/local-path-storage.yaml
> kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
> ```
### TLDR
```shell
@@ -43,9 +55,13 @@ To see all the available Make commands you can run `make help`, i.e:
test-controller Run the controller tests (pkg/controller)
test-kubelet-controller Run the controller tests (pkg/controller)
test-e2e Run the e2e tests
test-cli Run the cli tests
generate Generate the CRDs specs
docs Build the CRDs and CLI docs
docs-crds Build the CRDs docs
docs-cli Build the CLI docs
lint Find any linting issues in the project
fmt Format source files in the project
validate Validate the project checking for any dependency or doc mismatch
install Install K3k with Helm on the targeted Kubernetes cluster
help Show this help.
@@ -80,7 +96,20 @@ Once you have your images available you can install K3k with the `make install`
## Tests
To run the tests you can just run `make test`, or one of the other available "sub-tests" targets (`test-unit`, `test-controller`, `test-e2e`).
To run the tests you can just run `make test`, or one of the other available "sub-tests" targets (`test-unit`, `test-controller`, `test-e2e`, `test-cli`).
When running the tests the namespaces used are cleaned up. If you want to keep them to debug you can use the `KEEP_NAMESPACES`, i.e.:
```
KEEP_NAMESPACES=true make test-e2e
```
The e2e and cli tests run against the cluster configured in your KUBECONFIG environment variable. Running the tests with the `K3K_DOCKER_INSTALL` environment variable set will use `tescontainers` instead:
```
K3K_DOCKER_INSTALL=true make test-e2e
```
We use [Ginkgo](https://onsi.github.io/ginkgo/), and [`envtest`](https://book.kubebuilder.io/reference/envtest) for testing the controllers.
@@ -153,3 +182,7 @@ Last thing to do is to get the kubeconfig to connect to the virtual cluster we'v
```bash
k3kcli kubeconfig generate --name mycluster --namespace k3k-mycluster --kubeconfig-server localhost:30001
```
> [!IMPORTANT]
> Because of technical limitation is not possible to create virtual clusters in `virtual` mode with K3d, or any other dockerized environment (Kind, Minikube)

View File

@@ -167,7 +167,7 @@ kind: Cluster
metadata:
name: k3kcluster-custom-k8s
spec:
version: "v1.33.1-k3s1"
version: "v1.35.2-k3s1"
```
This sets the virtual cluster's Kubernetes version explicitly.
@@ -178,7 +178,7 @@ This sets the virtual cluster's Kubernetes version explicitly.
```sh
k3kcli cluster create \
--version v1.33.1-k3s1 \
--version v1.35.2-k3s1 \
k3kcluster-custom-k8s
```

225
go.mod
View File

@@ -1,58 +1,53 @@
module github.com/rancher/k3k
go 1.24.10
go 1.25.0
replace (
github.com/google/cel-go => github.com/google/cel-go v0.20.1
github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0
github.com/prometheus/client_model => github.com/prometheus/client_model v0.6.1
github.com/prometheus/common => github.com/prometheus/common v0.64.0
golang.org/x/term => golang.org/x/term v0.15.0
)
toolchain go1.25.8
require (
github.com/go-logr/logr v1.4.2
github.com/blang/semver/v4 v4.0.0
github.com/go-logr/logr v1.4.3
github.com/go-logr/zapr v1.3.0
github.com/google/go-cmp v0.7.0
github.com/onsi/ginkgo/v2 v2.21.0
github.com/onsi/gomega v1.36.0
github.com/onsi/ginkgo/v2 v2.28.1
github.com/onsi/gomega v1.39.1
github.com/rancher/dynamiclistener v1.27.5
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.10.1
github.com/sirupsen/logrus v1.9.4
github.com/spf13/cobra v1.10.2
github.com/spf13/pflag v1.0.10
github.com/spf13/viper v1.21.0
github.com/stretchr/testify v1.11.1
github.com/testcontainers/testcontainers-go v0.35.0
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803
go.etcd.io/etcd/api/v3 v3.5.16
go.etcd.io/etcd/client/v3 v3.5.16
go.uber.org/zap v1.27.0
gopkg.in/yaml.v2 v2.4.0
helm.sh/helm/v3 v3.14.4
k8s.io/api v0.31.13
k8s.io/apiextensions-apiserver v0.31.13
k8s.io/apimachinery v0.31.13
k8s.io/apiserver v0.31.13
k8s.io/cli-runtime v0.31.13
k8s.io/client-go v0.31.13
k8s.io/component-base v0.31.13
k8s.io/component-helpers v0.31.13
k8s.io/kubectl v0.31.13
k8s.io/kubelet v0.31.13
k8s.io/kubernetes v1.31.13
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/controller-runtime v0.19.4
github.com/testcontainers/testcontainers-go v0.41.0
github.com/testcontainers/testcontainers-go/modules/k3s v0.41.0
github.com/virtual-kubelet/virtual-kubelet v1.12.0
go.etcd.io/etcd/api/v3 v3.6.9
go.etcd.io/etcd/client/v3 v3.6.9
go.uber.org/zap v1.27.1
gopkg.in/yaml.v3 v3.0.1
helm.sh/helm/v3 v3.18.5
k8s.io/api v0.35.1
k8s.io/apiextensions-apiserver v0.35.1
k8s.io/apimachinery v0.35.1
k8s.io/apiserver v0.35.1
k8s.io/cli-runtime v0.35.1
k8s.io/client-go v0.35.1
k8s.io/component-base v0.35.1
k8s.io/component-helpers v0.35.1
k8s.io/kubectl v0.35.1
k8s.io/kubelet v0.35.1
k8s.io/kubernetes v1.35.1
k8s.io/utils v0.0.0-20260319190234-28399d86e0b5
sigs.k8s.io/controller-runtime v0.23.3
)
require (
dario.cat/mergo v1.0.1 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/BurntSushi/toml v1.4.0 // indirect
cel.dev/expr v0.25.1 // indirect
dario.cat/mergo v1.0.2 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/BurntSushi/toml v1.5.0 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.3.0 // indirect
github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
@@ -60,36 +55,33 @@ require (
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/containerd/containerd v1.7.24 // indirect
github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/containerd v1.7.30 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/dockercfg v0.3.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
github.com/cyphar/filepath-securejoin v0.6.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/cli v25.0.1+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker v27.1.1+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/docker v28.5.2+incompatible // indirect
github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/ebitengine/purego v0.10.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -104,33 +96,31 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/cel-go v0.22.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/cel-go v0.26.0 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmoiron/sqlx v1.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/compress v1.18.2 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/magiconair/properties v1.8.10 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
@@ -139,91 +129,90 @@ require (
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/go-archive v0.2.0 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/spdystream v0.5.0 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/user v0.3.0 // indirect
github.com/moby/sys/sequential v0.6.0 // indirect
github.com/moby/sys/user v0.4.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/moby/term v0.5.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_golang v1.20.5 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2
github.com/prometheus/common v0.64.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rubenv/sql-migrate v1.7.1 // indirect
github.com/prometheus/common v0.67.4 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/rubenv/sql-migrate v1.8.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sagikazarmark/locafero v0.11.0 // indirect
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
github.com/shirou/gopsutil/v4 v4.26.2 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
github.com/spf13/afero v1.15.0 // indirect
github.com/spf13/cast v1.10.0 // indirect
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/tklauser/go-sysconf v0.3.16 // indirect
github.com/tklauser/numcpus v0.11.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.6.9 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
go.opentelemetry.io/otel v1.33.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
go.opentelemetry.io/otel/metric v1.33.0 // indirect
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
go.opentelemetry.io/otel/trace v1.33.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel v1.41.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
go.opentelemetry.io/otel/metric v1.41.0 // indirect
go.opentelemetry.io/otel/sdk v1.41.0 // indirect
go.opentelemetry.io/otel/trace v1.41.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.40.0 // indirect
golang.org/x/crypto v0.48.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.42.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/term v0.33.0 // indirect
golang.org/x/text v0.28.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.35.0 // indirect
golang.org/x/mod v0.32.0 // indirect
golang.org/x/net v0.51.0 // indirect
golang.org/x/oauth2 v0.34.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.41.0 // indirect
golang.org/x/term v0.40.0 // indirect
golang.org/x/text v0.34.0 // indirect
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.41.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect
google.golang.org/grpc v1.67.3 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect
google.golang.org/grpc v1.79.3 // indirect
google.golang.org/protobuf v1.36.10 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/controller-manager v0.35.1 // indirect
k8s.io/klog/v2 v2.130.1
k8s.io/kms v0.31.13 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
oras.land/oras-go v1.2.5 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/kustomize/api v0.18.0 // indirect
sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.3 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
k8s.io/kms v0.35.1 // indirect
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
oras.land/oras-go/v2 v2.6.0 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/kustomize/api v0.20.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
)

678
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -100,7 +100,7 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
syncedConfigMap := c.translateConfigMap(&virtualConfigMap)
if err := controllerutil.SetControllerReference(&cluster, syncedConfigMap, c.HostClient.Scheme()); err != nil {
if err := controllerutil.SetOwnerReference(&cluster, syncedConfigMap, c.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}

View File

@@ -76,6 +76,7 @@ var ConfigMapTests = func() {
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
var hostConfigMap v1.ConfigMap
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
Eventually(func() error {
@@ -113,6 +114,7 @@ var ConfigMapTests = func() {
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
var hostConfigMap v1.ConfigMap
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
Eventually(func() error {
@@ -146,6 +148,7 @@ var ConfigMapTests = func() {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
Expect(err).NotTo(HaveOccurred())
return hostConfigMap.Labels
}).
WithPolling(time.Millisecond * 300).
@@ -172,6 +175,7 @@ var ConfigMapTests = func() {
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
var hostConfigMap v1.ConfigMap
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
Eventually(func() error {
@@ -192,6 +196,7 @@ var ConfigMapTests = func() {
Eventually(func() bool {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
@@ -221,12 +226,14 @@ var ConfigMapTests = func() {
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
var hostConfigMap v1.ConfigMap
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
GinkgoWriter.Printf("error: %v", err)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).

View File

@@ -98,7 +98,7 @@ func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request
syncedIngress := r.ingress(&virtIngress)
if err := controllerutil.SetControllerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
if err := controllerutil.SetOwnerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}

View File

@@ -107,6 +107,7 @@ var IngressTests = func() {
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
Eventually(func() error {
@@ -172,6 +173,7 @@ var IngressTests = func() {
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
Eventually(func() error {
@@ -205,6 +207,7 @@ var IngressTests = func() {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
Expect(err).NotTo(HaveOccurred())
return hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name
}).
WithPolling(time.Millisecond * 300).
@@ -256,6 +259,7 @@ var IngressTests = func() {
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
Eventually(func() error {
@@ -280,6 +284,7 @@ var IngressTests = func() {
Eventually(func() bool {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
@@ -335,11 +340,13 @@ var IngressTests = func() {
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).

View File

@@ -98,7 +98,7 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
}
syncedPVC := r.pvc(&virtPVC)
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
if err := controllerutil.SetOwnerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}

View File

@@ -85,6 +85,7 @@ var PVCTests = func() {
By(fmt.Sprintf("Created PVC %s in virtual cluster", pvc.Name))
var hostPVC v1.PersistentVolumeClaim
hostPVCName := translateName(cluster, pvc.Namespace, pvc.Name)
Eventually(func() error {
@@ -102,6 +103,7 @@ var PVCTests = func() {
GinkgoWriter.Printf("labels: %v\n", hostPVC.Labels)
var virtualPV v1.PersistentVolume
key := client.ObjectKey{Name: pvc.Name}
err = virtTestEnv.k8sClient.Get(ctx, key, &virtualPV)

View File

@@ -81,6 +81,7 @@ var PriorityClassTests = func() {
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() error {
@@ -113,6 +114,7 @@ var PriorityClassTests = func() {
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() error {
@@ -144,6 +146,7 @@ var PriorityClassTests = func() {
key := client.ObjectKey{Name: hostPriorityClassName}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
Expect(err).NotTo(HaveOccurred())
return hostPriorityClass.Labels
}).
WithPolling(time.Millisecond * 300).
@@ -165,6 +168,7 @@ var PriorityClassTests = func() {
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() error {
@@ -185,6 +189,7 @@ var PriorityClassTests = func() {
Eventually(func() bool {
key := client.ObjectKey{Name: hostPriorityClassName}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
@@ -207,6 +212,7 @@ var PriorityClassTests = func() {
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() error {
@@ -242,11 +248,13 @@ var PriorityClassTests = func() {
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
var hostPriorityClass schedulingv1.PriorityClass
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostPriorityClassName}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).

View File

@@ -117,7 +117,7 @@ func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Reque
hostPriorityClass := r.translatePriorityClass(priorityClass)
if err := controllerutil.SetControllerReference(&cluster, hostPriorityClass, r.HostClient.Scheme()); err != nil {
if err := controllerutil.SetOwnerReference(&cluster, hostPriorityClass, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}

View File

@@ -100,7 +100,7 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
syncedSecret := s.translateSecret(&virtualSecret)
if err := controllerutil.SetControllerReference(&cluster, syncedSecret, s.HostClient.Scheme()); err != nil {
if err := controllerutil.SetOwnerReference(&cluster, syncedSecret, s.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}

View File

@@ -76,6 +76,7 @@ var SecretTests = func() {
By(fmt.Sprintf("Created Secret %s in virtual cluster", secret.Name))
var hostSecret v1.Secret
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
Eventually(func() error {
@@ -113,6 +114,7 @@ var SecretTests = func() {
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
var hostSecret v1.Secret
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
Eventually(func() error {
@@ -144,6 +146,7 @@ var SecretTests = func() {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
Expect(err).NotTo(HaveOccurred())
return hostSecret.Labels
}).
WithPolling(time.Millisecond * 300).
@@ -170,6 +173,7 @@ var SecretTests = func() {
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
var hostSecret v1.Secret
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
Eventually(func() error {
@@ -190,6 +194,7 @@ var SecretTests = func() {
Eventually(func() bool {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
@@ -219,11 +224,13 @@ var SecretTests = func() {
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
var hostSecret v1.Secret
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).

View File

@@ -76,7 +76,7 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
syncedService := r.service(&virtService)
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
if err := controllerutil.SetOwnerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}

View File

@@ -84,6 +84,7 @@ var ServiceTests = func() {
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
var hostService v1.Service
hostServiceName := translateName(cluster, service.Namespace, service.Name)
Eventually(func() error {
@@ -132,6 +133,7 @@ var ServiceTests = func() {
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
var hostService v1.Service
hostServiceName := translateName(cluster, service.Namespace, service.Name)
Eventually(func() error {
@@ -163,6 +165,7 @@ var ServiceTests = func() {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
Expect(err).NotTo(HaveOccurred())
return hostService.Spec.Ports[0].Name
}).
WithPolling(time.Millisecond * 300).
@@ -196,6 +199,7 @@ var ServiceTests = func() {
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
var hostService v1.Service
hostServiceName := translateName(cluster, service.Namespace, service.Name)
Eventually(func() error {
@@ -218,6 +222,7 @@ var ServiceTests = func() {
Eventually(func() bool {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
err := hostTestEnv.k8sClient.Get(ctx, key, &hostService)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).
@@ -255,11 +260,13 @@ var ServiceTests = func() {
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
var hostService v1.Service
hostServiceName := translateName(cluster, service.Namespace, service.Name)
Eventually(func() bool {
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
return apierrors.IsNotFound(err)
}).
WithPolling(time.Millisecond * 300).

View File

@@ -133,6 +133,7 @@ var _ = Describe("Kubelet Controller", func() {
BeforeEach(func() {
var err error
ctx, cancel = context.WithCancel(context.Background())
hostManager, err = ctrl.NewManager(hostTestEnv.Config, ctrl.Options{
@@ -151,12 +152,14 @@ var _ = Describe("Kubelet Controller", func() {
go func() {
defer GinkgoRecover()
err := hostManager.Start(ctx)
Expect(err).NotTo(HaveOccurred(), "failed to run host manager")
}()
go func() {
defer GinkgoRecover()
err := virtManager.Start(ctx)
Expect(err).NotTo(HaveOccurred(), "failed to run virt manager")
}()

View File

@@ -66,16 +66,12 @@ func AddPodMutatingWebhook(ctx context.Context, mgr manager.Manager, hostClient
}
}
// register webhook with the manager
return ctrl.NewWebhookManagedBy(mgr).For(&v1.Pod{}).WithDefaulter(&handler).Complete()
return ctrl.NewWebhookManagedBy(mgr, &v1.Pod{}).WithDefaulter(&handler).Complete()
}
func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error {
pod, ok := obj.(*v1.Pod)
if !ok {
return fmt.Errorf("invalid request: object was type %t not cluster", obj)
}
func (w *webhookHandler) Default(ctx context.Context, pod *v1.Pod) error {
w.logger.Info("mutating webhook request", "pod", pod.Name, "namespace", pod.Namespace)
// look for status.* fields in the env
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)

View File

@@ -265,14 +265,25 @@ func (k *kubelet) start(ctx context.Context) {
func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
return func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) {
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, cfg.ClusterNamespace, cfg.ClusterName, cfg.ServerIP, k.dnsIP)
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, cfg.ClusterNamespace, cfg.ClusterName, cfg.ServerIP, k.dnsIP, cfg.AgentHostname)
if err != nil {
return nil, nil, errors.New("unable to make nodeutil provider: " + err.Error())
}
provider.ConfigureNode(k.logger, pc.Node, cfg.AgentHostname, k.port, k.agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, cfg.Version, cfg.MirrorHostNodes)
err = provider.ConfigureNode(
k.logger,
pc.Node,
cfg.AgentHostname,
k.port,
k.agentIP,
k.hostMgr,
utilProvider.VirtualClient,
k.virtualCluster,
cfg.Version,
cfg.MirrorHostNodes,
)
return utilProvider, &provider.Node{}, nil
return utilProvider, &provider.Node{}, err
}
}
@@ -294,6 +305,9 @@ func (k *kubelet) nodeOpts(srvPort int, namespace, name, hostname, agentIP strin
c.TLSConfig = tlsConfig
c.NodeSpec.Labels["kubernetes.io/role"] = "worker"
c.NodeSpec.Labels["node-role.kubernetes.io/worker"] = "true"
return nil
}
}
@@ -316,8 +330,10 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
return err != nil
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
logger.Error(err, "decoded bootstrap")
return err
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())
@@ -377,7 +393,9 @@ func loadTLSConfig(clusterName, clusterNamespace, nodeName, hostname, token, age
return err != nil
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())

View File

@@ -38,6 +38,7 @@ func main() {
logger = zapr.NewLogger(log.New(debug, logFormat))
ctrlruntimelog.SetLogger(logger)
return nil
},
RunE: run,

View File

@@ -2,26 +2,26 @@ package provider
import (
"context"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) {
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, hostMgr manager.Manager, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) error {
ctx := context.Background()
if mirrorHostNodes {
hostNode, err := coreClient.Nodes().Get(ctx, node.Name, metav1.GetOptions{})
if err != nil {
logger.Error(err, "error getting host node for mirroring", err)
var hostNode corev1.Node
if err := hostMgr.GetAPIReader().Get(ctx, types.NamespacedName{Name: node.Name}, &hostNode); err != nil {
logger.Error(err, "error getting host node for mirroring", "node", node.Name)
return err
}
node.Spec = *hostNode.Spec.DeepCopy()
@@ -50,17 +50,10 @@ func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servi
// configure versions
node.Status.NodeInfo.KubeletVersion = version
updateNodeCapacityInterval := 10 * time.Second
ticker := time.NewTicker(updateNodeCapacityInterval)
go func() {
for range ticker.C {
if err := updateNodeCapacity(ctx, coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
logger.Error(err, "error updating node capacity")
}
}
}()
startNodeCapacityUpdater(ctx, logger, hostMgr.GetClient(), virtualClient, virtualCluster, node.Name)
}
return nil
}
// nodeConditions returns the basic conditions which mark the node as ready
@@ -108,73 +101,3 @@ func nodeConditions() []corev1.NodeCondition {
},
}
}
// updateNodeCapacity will update the virtual node capacity (and the allocatable field) with the sum of all the resource in the host nodes.
// If the nodeLabels are specified only the matching nodes will be considered.
func updateNodeCapacity(ctx context.Context, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error {
capacity, allocatable, err := getResourcesFromNodes(ctx, coreClient, nodeLabels)
if err != nil {
return err
}
var virtualNode corev1.Node
if err := virtualClient.Get(ctx, types.NamespacedName{Name: virtualNodeName}, &virtualNode); err != nil {
return err
}
virtualNode.Status.Capacity = capacity
virtualNode.Status.Allocatable = allocatable
return virtualClient.Status().Update(ctx, &virtualNode)
}
// getResourcesFromNodes will return a sum of all the resource capacity of the host nodes, and the allocatable resources.
// If some node labels are specified only the matching nodes will be considered.
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (corev1.ResourceList, corev1.ResourceList, error) {
listOpts := metav1.ListOptions{}
if nodeLabels != nil {
labelSelector := metav1.LabelSelector{MatchLabels: nodeLabels}
listOpts.LabelSelector = labels.Set(labelSelector.MatchLabels).String()
}
nodeList, err := coreClient.Nodes().List(ctx, listOpts)
if err != nil {
return nil, nil, err
}
// sum all
virtualCapacityResources := corev1.ResourceList{}
virtualAvailableResources := corev1.ResourceList{}
for _, node := range nodeList.Items {
// check if the node is Ready
for _, condition := range node.Status.Conditions {
if condition.Type != corev1.NodeReady {
continue
}
// if the node is not Ready then we can skip it
if condition.Status != corev1.ConditionTrue {
break
}
}
// add all the available metrics to the virtual node
for resourceName, resourceQuantity := range node.Status.Capacity {
virtualResource := virtualCapacityResources[resourceName]
(&virtualResource).Add(resourceQuantity)
virtualCapacityResources[resourceName] = virtualResource
}
for resourceName, resourceQuantity := range node.Status.Allocatable {
virtualResource := virtualAvailableResources[resourceName]
(&virtualResource).Add(resourceQuantity)
virtualAvailableResources[resourceName] = virtualResource
}
}
return virtualCapacityResources, virtualAvailableResources, nil
}

View File

@@ -0,0 +1,243 @@
package provider
import (
"context"
"maps"
"sort"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
// UpdateNodeCapacityInterval is the interval at which the node capacity is updated.
UpdateNodeCapacityInterval = 10 * time.Second
)
// milliScaleResources is a set of resource names that are measured in milli-units (e.g., CPU).
// This is used to determine whether to use MilliValue() for calculations.
var milliScaleResources = map[corev1.ResourceName]struct{}{
corev1.ResourceCPU: {},
corev1.ResourceMemory: {},
corev1.ResourceStorage: {},
corev1.ResourceEphemeralStorage: {},
corev1.ResourceRequestsCPU: {},
corev1.ResourceRequestsMemory: {},
corev1.ResourceRequestsStorage: {},
corev1.ResourceRequestsEphemeralStorage: {},
corev1.ResourceLimitsCPU: {},
corev1.ResourceLimitsMemory: {},
corev1.ResourceLimitsEphemeralStorage: {},
}
// StartNodeCapacityUpdater starts a goroutine that periodically updates the capacity
// of the virtual node based on host node capacity and any applied ResourceQuotas.
func startNodeCapacityUpdater(ctx context.Context, logger logr.Logger, hostClient client.Client, virtualClient client.Client, virtualCluster v1beta1.Cluster, virtualNodeName string) {
go func() {
ticker := time.NewTicker(UpdateNodeCapacityInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
updateNodeCapacity(ctx, logger, hostClient, virtualClient, virtualCluster, virtualNodeName)
case <-ctx.Done():
logger.Info("Stopping node capacity updates for node", "node", virtualNodeName)
return
}
}
}()
}
// updateNodeCapacity will update the virtual node capacity (and the allocatable field) with the sum of all the resource in the host nodes.
// If the nodeLabels are specified only the matching nodes will be considered.
func updateNodeCapacity(ctx context.Context, logger logr.Logger, hostClient client.Client, virtualClient client.Client, virtualCluster v1beta1.Cluster, virtualNodeName string) {
// by default we get the resources of the same Node where the kubelet is running
var node corev1.Node
if err := hostClient.Get(ctx, types.NamespacedName{Name: virtualNodeName}, &node); err != nil {
logger.Error(err, "error getting virtual node for updating node capacity")
return
}
allocatable := node.Status.Allocatable.DeepCopy()
// we need to check if the virtual cluster resources are "limited" through ResourceQuotas
// If so we will use the minimum resources
var quotas corev1.ResourceQuotaList
if err := hostClient.List(ctx, &quotas, &client.ListOptions{Namespace: virtualCluster.Namespace}); err != nil {
logger.Error(err, "error getting namespace for updating node capacity")
}
if len(quotas.Items) > 0 {
resourceLists := []corev1.ResourceList{allocatable}
for _, q := range quotas.Items {
resourceLists = append(resourceLists, q.Status.Hard)
}
mergedResourceLists := mergeResourceLists(resourceLists...)
var virtualNodeList, hostNodeList corev1.NodeList
if err := virtualClient.List(ctx, &virtualNodeList); err != nil {
logger.Error(err, "error listing virtual nodes for stable capacity distribution")
}
virtResourceMap := make(map[string]corev1.ResourceList)
for _, vNode := range virtualNodeList.Items {
virtResourceMap[vNode.Name] = corev1.ResourceList{}
}
if err := hostClient.List(ctx, &hostNodeList); err != nil {
logger.Error(err, "error listing host nodes for stable capacity distribution")
}
hostResourceMap := make(map[string]corev1.ResourceList)
for _, hNode := range hostNodeList.Items {
if _, ok := virtResourceMap[hNode.Name]; ok {
hostResourceMap[hNode.Name] = hNode.Status.Allocatable
}
}
m := distributeQuotas(hostResourceMap, virtResourceMap, mergedResourceLists)
allocatable = m[virtualNodeName]
}
var virtualNode corev1.Node
if err := virtualClient.Get(ctx, types.NamespacedName{Name: virtualNodeName}, &virtualNode); err != nil {
logger.Error(err, "error getting virtual node for updating node capacity")
return
}
virtualNode.Status.Capacity = allocatable
virtualNode.Status.Allocatable = allocatable
if err := virtualClient.Status().Update(ctx, &virtualNode); err != nil {
logger.Error(err, "error updating node capacity")
}
}
// mergeResourceLists takes multiple resource lists and returns a single list that represents
// the most restrictive set of resources. For each resource name, it selects the minimum
// quantity found across all the provided lists.
func mergeResourceLists(resourceLists ...corev1.ResourceList) corev1.ResourceList {
merged := corev1.ResourceList{}
for _, resourceList := range resourceLists {
for resName, qty := range resourceList {
existingQty, found := merged[resName]
// If it's the first time we see it OR the new one is smaller -> Update
if !found || qty.Cmp(existingQty) < 0 {
merged[resName] = qty.DeepCopy()
}
}
}
return merged
}
// distributeQuotas divides the total resource quotas among all active virtual nodes,
// capped by each node's actual host capacity. This ensures that each virtual node
// reports a fair share of the available resources without exceeding what its
// underlying host node can provide.
//
// For each resource type the algorithm uses a multi-pass redistribution loop:
// 1. Divide the remaining quota evenly among eligible nodes (sorted by name for
// determinism), assigning any integer remainder to the first nodes alphabetically.
// 2. Cap each node's share at its host allocatable capacity.
// 3. Remove nodes that have reached their host capacity.
// 4. If there is still unallocated quota (because some nodes were capped below their
// even share), repeat from step 1 with the remaining quota and remaining nodes.
//
// The loop terminates when the quota is fully distributed or no eligible nodes remain.
func distributeQuotas(hostResourceMap, virtResourceMap map[string]corev1.ResourceList, quotas corev1.ResourceList) map[string]corev1.ResourceList {
resourceMap := make(map[string]corev1.ResourceList, len(virtResourceMap))
maps.Copy(resourceMap, virtResourceMap)
// Distribute each resource type from the policy's hard quota
for resourceName, totalQuantity := range quotas {
_, useMilli := milliScaleResources[resourceName]
// eligible nodes for each distribution cycle
var eligibleNodes []string
hostCap := make(map[string]int64)
// Populate the host nodes capacity map and the initial effective nodes
for vn := range virtResourceMap {
hostNodeResources := hostResourceMap[vn]
if hostNodeResources == nil {
continue
}
resourceQuantity, found := hostNodeResources[resourceName]
if !found {
// skip the node if the resource does not exist on the host node
continue
}
hostCap[vn] = resourceQuantity.Value()
if useMilli {
hostCap[vn] = resourceQuantity.MilliValue()
}
eligibleNodes = append(eligibleNodes, vn)
}
sort.Strings(eligibleNodes)
totalValue := totalQuantity.Value()
if useMilli {
totalValue = totalQuantity.MilliValue()
}
// Start of the distribution cycle, each cycle will distribute the quota resource
// evenly between nodes, each node can not exceed the corresponding host node capacity
for totalValue > 0 && len(eligibleNodes) > 0 {
nodeNum := int64(len(eligibleNodes))
quantityPerNode := totalValue / nodeNum
remainder := totalValue % nodeNum
remainingNodes := []string{}
for _, virtualNodeName := range eligibleNodes {
nodeQuantity := quantityPerNode
if remainder > 0 {
nodeQuantity++
remainder--
}
// We cap the quantity to the hostNode capacity
nodeQuantity = min(nodeQuantity, hostCap[virtualNodeName])
if nodeQuantity > 0 {
existing := resourceMap[virtualNodeName][resourceName]
if useMilli {
resourceMap[virtualNodeName][resourceName] = *resource.NewMilliQuantity(existing.MilliValue()+nodeQuantity, totalQuantity.Format)
} else {
resourceMap[virtualNodeName][resourceName] = *resource.NewQuantity(existing.Value()+nodeQuantity, totalQuantity.Format)
}
}
totalValue -= nodeQuantity
hostCap[virtualNodeName] -= nodeQuantity
if hostCap[virtualNodeName] > 0 {
remainingNodes = append(remainingNodes, virtualNodeName)
}
}
eligibleNodes = remainingNodes
}
}
return resourceMap
}

View File

@@ -0,0 +1,296 @@
package provider
import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
corev1 "k8s.io/api/core/v1"
)
func Test_distributeQuotas(t *testing.T) {
scheme := runtime.NewScheme()
err := corev1.AddToScheme(scheme)
assert.NoError(t, err)
// Large allocatable so capping doesn't interfere with basic distribution tests.
largeAllocatable := corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("100"),
corev1.ResourceMemory: resource.MustParse("100Gi"),
corev1.ResourcePods: resource.MustParse("1000"),
}
tests := []struct {
name string
virtResourceMap map[string]corev1.ResourceList
hostResourceMap map[string]corev1.ResourceList
quotas corev1.ResourceList
want map[string]corev1.ResourceList
}{
{
name: "no virtual nodes",
virtResourceMap: map[string]corev1.ResourceList{},
quotas: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2"),
},
want: map[string]corev1.ResourceList{},
},
{
name: "no quotas",
virtResourceMap: map[string]corev1.ResourceList{
"node-1": {},
"node-2": {},
},
hostResourceMap: map[string]corev1.ResourceList{
"node-1": largeAllocatable,
"node-2": largeAllocatable,
},
quotas: corev1.ResourceList{},
want: map[string]corev1.ResourceList{
"node-1": {},
"node-2": {},
},
},
{
name: "fewer virtual nodes than host nodes",
virtResourceMap: map[string]corev1.ResourceList{
"node-1": {},
"node-2": {},
},
hostResourceMap: map[string]corev1.ResourceList{
"node-1": largeAllocatable,
"node-2": largeAllocatable,
"node-3": largeAllocatable,
"node-4": largeAllocatable,
},
quotas: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2"),
corev1.ResourceMemory: resource.MustParse("4Gi"),
},
want: map[string]corev1.ResourceList{
"node-1": {
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("2Gi"),
},
"node-2": {
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("2Gi"),
},
},
},
{
name: "even distribution of cpu and memory",
virtResourceMap: map[string]corev1.ResourceList{
"node-1": {},
"node-2": {},
},
hostResourceMap: map[string]corev1.ResourceList{
"node-1": largeAllocatable,
"node-2": largeAllocatable,
},
quotas: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2"),
corev1.ResourceMemory: resource.MustParse("4Gi"),
},
want: map[string]corev1.ResourceList{
"node-1": {
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("2Gi"),
},
"node-2": {
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("2Gi"),
},
},
},
{
name: "uneven distribution with remainder",
virtResourceMap: map[string]corev1.ResourceList{
"node-1": {},
"node-2": {},
"node-3": {},
},
hostResourceMap: map[string]corev1.ResourceList{
"node-1": largeAllocatable,
"node-2": largeAllocatable,
"node-3": largeAllocatable,
},
quotas: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2"),
},
want: map[string]corev1.ResourceList{
"node-1": {corev1.ResourceCPU: resource.MustParse("667m")},
"node-2": {corev1.ResourceCPU: resource.MustParse("667m")},
"node-3": {corev1.ResourceCPU: resource.MustParse("666m")},
},
},
{
name: "distribution of number resources",
virtResourceMap: map[string]corev1.ResourceList{
"node-1": {},
"node-2": {},
"node-3": {},
},
hostResourceMap: map[string]corev1.ResourceList{
"node-1": largeAllocatable,
"node-2": largeAllocatable,
"node-3": largeAllocatable,
},
quotas: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2"),
corev1.ResourcePods: resource.MustParse("11"),
},
want: map[string]corev1.ResourceList{
"node-1": {
corev1.ResourceCPU: resource.MustParse("667m"),
corev1.ResourcePods: resource.MustParse("4"),
},
"node-2": {
corev1.ResourceCPU: resource.MustParse("667m"),
corev1.ResourcePods: resource.MustParse("4"),
},
"node-3": {
corev1.ResourceCPU: resource.MustParse("666m"),
corev1.ResourcePods: resource.MustParse("3"),
},
},
},
{
name: "extended resource distributed only to nodes that have it",
virtResourceMap: map[string]corev1.ResourceList{
"node-1": {},
"node-2": {},
"node-3": {},
},
hostResourceMap: map[string]corev1.ResourceList{
"node-1": {
corev1.ResourceCPU: resource.MustParse("100"),
"nvidia.com/gpu": resource.MustParse("2"),
},
"node-2": {
corev1.ResourceCPU: resource.MustParse("100"),
},
"node-3": {
corev1.ResourceCPU: resource.MustParse("100"),
"nvidia.com/gpu": resource.MustParse("4"),
},
},
quotas: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("3"),
"nvidia.com/gpu": resource.MustParse("4"),
},
want: map[string]corev1.ResourceList{
"node-1": {
corev1.ResourceCPU: resource.MustParse("1"),
"nvidia.com/gpu": resource.MustParse("2"),
},
"node-2": {
corev1.ResourceCPU: resource.MustParse("1"),
},
"node-3": {
corev1.ResourceCPU: resource.MustParse("1"),
"nvidia.com/gpu": resource.MustParse("2"),
},
},
},
{
name: "capping at host capacity with redistribution",
virtResourceMap: map[string]corev1.ResourceList{
"node-1": {},
"node-2": {},
},
hostResourceMap: map[string]corev1.ResourceList{
"node-1": {
corev1.ResourceCPU: resource.MustParse("8"),
},
"node-2": {
corev1.ResourceCPU: resource.MustParse("2"),
},
},
quotas: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("6"),
},
// Even split would be 3 each, but node-2 only has 2 CPU.
// node-2 gets capped at 2, the remaining 1 goes to node-1.
want: map[string]corev1.ResourceList{
"node-1": {corev1.ResourceCPU: resource.MustParse("4")},
"node-2": {corev1.ResourceCPU: resource.MustParse("2")},
},
},
{
name: "gpu capping with uneven host capacity",
virtResourceMap: map[string]corev1.ResourceList{
"node-1": {},
"node-2": {},
},
hostResourceMap: map[string]corev1.ResourceList{
"node-1": {
"nvidia.com/gpu": resource.MustParse("6"),
},
"node-2": {
"nvidia.com/gpu": resource.MustParse("1"),
},
},
quotas: corev1.ResourceList{
"nvidia.com/gpu": resource.MustParse("4"),
},
// Even split would be 2 each, but node-2 only has 1 GPU.
// node-2 gets capped at 1, the remaining 1 goes to node-1.
want: map[string]corev1.ResourceList{
"node-1": {"nvidia.com/gpu": resource.MustParse("3")},
"node-2": {"nvidia.com/gpu": resource.MustParse("1")},
},
},
{
name: "quota exceeds total host capacity",
virtResourceMap: map[string]corev1.ResourceList{
"node-1": {},
"node-2": {},
"node-3": {},
},
hostResourceMap: map[string]corev1.ResourceList{
"node-1": {
"nvidia.com/gpu": resource.MustParse("2"),
},
"node-2": {
"nvidia.com/gpu": resource.MustParse("1"),
},
"node-3": {
"nvidia.com/gpu": resource.MustParse("1"),
},
},
quotas: corev1.ResourceList{
"nvidia.com/gpu": resource.MustParse("10"),
},
// Total host capacity is 4, quota is 10. Each node gets its full capacity.
want: map[string]corev1.ResourceList{
"node-1": {"nvidia.com/gpu": resource.MustParse("2")},
"node-2": {"nvidia.com/gpu": resource.MustParse("1")},
"node-3": {"nvidia.com/gpu": resource.MustParse("1")},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := distributeQuotas(tt.hostResourceMap, tt.virtResourceMap, tt.quotas)
assert.Equal(t, len(tt.want), len(got), "Number of nodes in result should match")
for nodeName, expectedResources := range tt.want {
actualResources, ok := got[nodeName]
assert.True(t, ok, "Node %s not found in result", nodeName)
assert.Equal(t, len(expectedResources), len(actualResources), "Number of resources for node %s should match", nodeName)
for resName, expectedQty := range expectedResources {
actualQty, ok := actualResources[resName]
assert.True(t, ok, "Resource %s not found for node %s", resName, nodeName)
assert.True(t, expectedQty.Equal(actualQty), "Resource %s for node %s did not match. want: %s, got: %s", resName, nodeName, expectedQty.String(), actualQty.String())
}
}
})
}
}

View File

@@ -35,7 +35,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
compbasemetrics "k8s.io/component-base/metrics"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
v1alpha1stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
@@ -60,12 +60,13 @@ type Provider struct {
ClusterName string
serverIP string
dnsIP string
agentHostname string
logger logr.Logger
}
var ErrRetryTimeout = errors.New("provider timed out")
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger logr.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger logr.Logger, namespace, name, serverIP, dnsIP, agentHostname string) (*Provider, error) {
coreClient, err := cv1.NewForConfig(&hostConfig)
if err != nil {
return nil, err
@@ -88,6 +89,7 @@ func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger log
logger: logger.WithValues("cluster", name),
serverIP: serverIP,
dnsIP: dnsIP,
agentHostname: agentHostname,
}
return &p, nil
@@ -225,47 +227,35 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, name, conta
}
// GetStatsSummary gets the stats for the node, including running pods
func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
func (p *Provider) GetStatsSummary(ctx context.Context) (*v1alpha1stats.Summary, error) {
p.logger.V(1).Info("GetStatsSummary")
nodeList := &corev1.NodeList{}
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
node, err := p.CoreClient.Nodes().Get(ctx, p.agentHostname, metav1.GetOptions{})
if err != nil {
p.logger.Error(err, "Unable to get nodes of cluster")
return nil, err
}
// fetch the stats from all the nodes
var (
nodeStats stats.NodeStats
allPodsStats []stats.PodStats
)
for _, n := range nodeList.Items {
res, err := p.CoreClient.RESTClient().
Get().
Resource("nodes").
Name(n.Name).
SubResource("proxy").
Suffix("stats/summary").
DoRaw(ctx)
if err != nil {
p.logger.Error(err, "Unable to get stats/summary from cluster node", "node", n.Name)
return nil, err
}
stats := &stats.Summary{}
if err := json.Unmarshal(res, stats); err != nil {
p.logger.Error(err, "Error unmarshaling stats/summary from cluster node", "node", n.Name)
return nil, err
}
// TODO: we should probably calculate somehow the node stats from the different nodes of the host
// or reflect different nodes from the virtual kubelet.
// For the moment let's just pick one random node stats.
nodeStats = stats.Node
allPodsStats = append(allPodsStats, stats.Pods...)
res, err := p.CoreClient.RESTClient().
Get().
Resource("nodes").
Name(node.Name).
SubResource("proxy").
Suffix("stats/summary").
DoRaw(ctx)
if err != nil {
p.logger.Error(err, "Unable to get stats/summary from cluster node", "node", node.Name)
return nil, err
}
var statsSummary v1alpha1stats.Summary
if err := json.Unmarshal(res, &statsSummary); err != nil {
p.logger.Error(err, "Error unmarshaling stats/summary from cluster node", "node", node.Name)
return nil, err
}
nodeStats := statsSummary.Node
pods, err := p.GetPods(ctx)
if err != nil {
p.logger.Error(err, "Error getting pods from cluster for stats")
@@ -279,12 +269,12 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
podsNameMap[hostPodName] = pod
}
filteredStats := &stats.Summary{
filteredStats := &v1alpha1stats.Summary{
Node: nodeStats,
Pods: make([]stats.PodStats, 0),
Pods: make([]v1alpha1stats.PodStats, 0),
}
for _, podStat := range allPodsStats {
for _, podStat := range statsSummary.Pods {
// skip pods that are not in the cluster namespace
if podStat.PodRef.Namespace != p.ClusterNamespace {
continue
@@ -292,7 +282,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
// rewrite the PodReference to match the data of the virtual cluster
if pod, found := podsNameMap[podStat.PodRef.Name]; found {
podStat.PodRef = stats.PodReference{
podStat.PodRef = v1alpha1stats.PodReference{
Name: pod.Name,
Namespace: pod.Namespace,
UID: string(pod.UID),
@@ -408,31 +398,48 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
logger = logger.WithValues("pod", hostPod.Name)
// the node was scheduled on the virtual kubelet, but leaving it this way will make it pending indefinitely
hostPod.Spec.NodeName = ""
// Schedule the host pod in the same host node of the virtual kubelet
hostPod.Spec.NodeName = p.agentHostname
// The pod's own nodeSelector is ignored.
// The final selector is determined by the cluster spec, but overridden by a policy if present.
hostPod.Spec.NodeSelector = cluster.Spec.NodeSelector
if cluster.Status.Policy != nil && len(cluster.Status.Policy.NodeSelector) > 0 {
hostPod.Spec.NodeSelector = cluster.Status.Policy.NodeSelector
}
// setting the hostname for the pod if its not set
if virtualPod.Spec.Hostname == "" {
hostPod.Spec.Hostname = k3kcontroller.SafeConcatName(virtualPod.Name)
}
// if the priorityClass for the virtual cluster is set then override the provided value
// When a PriorityClass is set we will use the translated one in the HostCluster.
// If the Cluster or a Policy defines a PriorityClass of the host we are going to use that one.
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
if !strings.HasPrefix(hostPod.Spec.PriorityClassName, "system-") {
if hostPod.Spec.PriorityClassName != "" {
tPriorityClassName := p.Translator.TranslateName("", hostPod.Spec.PriorityClassName)
hostPod.Spec.PriorityClassName = tPriorityClassName
//
// TODO: we probably need to define a custom "intermediate" k3k-system-* priority
if strings.HasPrefix(virtualPod.Spec.PriorityClassName, "system-") {
hostPod.Spec.PriorityClassName = virtualPod.Spec.PriorityClassName
} else {
enforcedPriorityClassName := cluster.Spec.PriorityClass
if cluster.Status.Policy != nil && cluster.Status.Policy.PriorityClass != nil {
enforcedPriorityClassName = *cluster.Status.Policy.PriorityClass
}
if cluster.Spec.PriorityClass != "" {
hostPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
if enforcedPriorityClassName != "" {
hostPod.Spec.PriorityClassName = enforcedPriorityClassName
} else if virtualPod.Spec.PriorityClassName != "" {
hostPod.Spec.PriorityClassName = p.Translator.TranslateName("", virtualPod.Spec.PriorityClassName)
hostPod.Spec.Priority = nil
}
}
// if the priority class is set we need to remove the priority
if hostPod.Spec.PriorityClassName != "" {
hostPod.Spec.Priority = nil
}
p.configurePodEnvs(hostPod, &virtualPod)
// fieldpath annotations

View File

@@ -57,6 +57,7 @@ func main() {
},
PersistentPreRun: func(cmd *cobra.Command, args []string) {
cmds.InitializeConfig(cmd)
logger = zapr.NewLogger(log.New(debug, logFormat))
},
RunE: run,

View File

@@ -1,4 +1,4 @@
FROM alpine
FROM registry.suse.com/bci/bci-base:15.7
ARG BIN_K3K=bin/k3k
ARG BIN_K3KCLI=bin/k3kcli

View File

@@ -1,5 +1,5 @@
# TODO: swicth this to BCI-micro or scratch. Left as base right now so that debug can be done a bit easier
FROM registry.suse.com/bci/bci-base:15.6
FROM registry.suse.com/bci/bci-base:15.7
ARG BIN_K3K_KUBELET=bin/k3k-kubelet

View File

@@ -1,7 +1,9 @@
package v1beta1
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -123,7 +125,7 @@ type ClusterSpec struct {
// The Secret must have a "token" field in its data.
//
// +optional
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef,omitempty"`
TokenSecretRef *corev1.SecretReference `json:"tokenSecretRef,omitempty"`
// TLSSANs specifies subject alternative names for the K3s server certificate.
//
@@ -145,12 +147,12 @@ type ClusterSpec struct {
// ServerEnvs specifies list of environment variables to set in the server pod.
//
// +optional
ServerEnvs []v1.EnvVar `json:"serverEnvs,omitempty"`
ServerEnvs []corev1.EnvVar `json:"serverEnvs,omitempty"`
// AgentEnvs specifies list of environment variables to set in the agent pod.
//
// +optional
AgentEnvs []v1.EnvVar `json:"agentEnvs,omitempty"`
AgentEnvs []corev1.EnvVar `json:"agentEnvs,omitempty"`
// Addons specifies secrets containing raw YAML to deploy on cluster startup.
//
@@ -160,12 +162,12 @@ type ClusterSpec struct {
// ServerLimit specifies resource limits for server nodes.
//
// +optional
ServerLimit v1.ResourceList `json:"serverLimit,omitempty"`
ServerLimit corev1.ResourceList `json:"serverLimit,omitempty"`
// WorkerLimit specifies resource limits for agent nodes.
//
// +optional
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
WorkerLimit corev1.ResourceList `json:"workerLimit,omitempty"`
// MirrorHostNodes controls whether node objects from the host cluster
// are mirrored into the virtual cluster.
@@ -183,6 +185,36 @@ type ClusterSpec struct {
// +kubebuilder:default={}
// +optional
Sync *SyncConfig `json:"sync,omitempty"`
// SecretMounts specifies a list of secrets to mount into server and agent pods.
// Each entry defines a secret and its mount path within the pods.
//
// +optional
SecretMounts []SecretMount `json:"secretMounts,omitempty"`
}
// SecretMount defines a secret to be mounted into server or agent pods,
// allowing for custom configurations, certificates, or other sensitive data.
type SecretMount struct {
// Embeds SecretName, Items, DefaultMode, and Optional
corev1.SecretVolumeSource `json:",inline"`
// MountPath is the path within server and agent pods where the
// secret contents will be mounted.
//
// +optional
MountPath string `json:"mountPath,omitempty"`
// SubPath is an optional path within the secret to mount instead of the root.
// When specified, only the specified key from the secret will be mounted as a file
// at MountPath, keeping the parent directory writable.
//
// +optional
SubPath string `json:"subPath,omitempty"`
// Role is the type of the k3k pod that will be used to mount the secret.
// This can be 'server', 'agent', or 'all' (for both).
//
// +optional
// +kubebuilder:validation:Enum=server;agent;all
Role string `json:"role,omitempty"`
}
// SyncConfig will contain the resources that should be synced from virtual cluster to host cluster.
@@ -217,9 +249,14 @@ type SyncConfig struct {
// +kubebuilder:default={"enabled": false}
// +optional
PriorityClasses PriorityClassSyncConfig `json:"priorityClasses"`
// StorageClasses resources sync configuration.
//
// +kubebuilder:default={"enabled": false}
// +optional
StorageClasses StorageClassSyncConfig `json:"storageClasses"`
}
// SecretSyncConfig specifies the sync options for services.
// SecretSyncConfig specifies the sync options for Secrets.
type SecretSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -234,7 +271,7 @@ type SecretSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// ServiceSyncConfig specifies the sync options for services.
// ServiceSyncConfig specifies the sync options for Services.
type ServiceSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -249,7 +286,7 @@ type ServiceSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// ConfigMapSyncConfig specifies the sync options for services.
// ConfigMapSyncConfig specifies the sync options for ConfigMaps.
type ConfigMapSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -264,7 +301,7 @@ type ConfigMapSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// IngressSyncConfig specifies the sync options for services.
// IngressSyncConfig specifies the sync options for Ingresses.
type IngressSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -279,7 +316,7 @@ type IngressSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// PersistentVolumeClaimSyncConfig specifies the sync options for services.
// PersistentVolumeClaimSyncConfig specifies the sync options for PersistentVolumeClaims.
type PersistentVolumeClaimSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -294,7 +331,7 @@ type PersistentVolumeClaimSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// PriorityClassSyncConfig specifies the sync options for services.
// PriorityClassSyncConfig specifies the sync options for PriorityClasses.
type PriorityClassSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -309,6 +346,21 @@ type PriorityClassSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// StorageClassSyncConfig specifies the sync options for StorageClasses.
type StorageClassSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
// +kubebuilder:default=false
// +required
Enabled bool `json:"enabled"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// ClusterMode is the possible provisioning mode of a Cluster.
//
// +kubebuilder:validation:Enum=shared;virtual
@@ -362,8 +414,9 @@ type PersistenceConfig struct {
// This field is only relevant in "dynamic" mode.
//
// +kubebuilder:default="2G"
// +kubebuilder:validation:XValidation:message="storageRequestSize is immutable",rule="self == oldSelf"
// +optional
StorageRequestSize string `json:"storageRequestSize,omitempty"`
StorageRequestSize *resource.Quantity `json:"storageRequestSize,omitempty"`
}
// ExposeConfig specifies options for exposing the API server.
@@ -467,10 +520,9 @@ type CredentialSources struct {
// CredentialSource defines where to get a credential from.
// It can represent either a TLS key pair or a single private key.
type CredentialSource struct {
// SecretName specifies the name of an existing secret to use.
// The controller expects specific keys inside based on the credential type:
// - For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
// - For ServiceAccountTokenKey: 'tls.key'.
// The secret must contain specific keys based on the credential type:
// - For TLS certificate pairs (e.g., ServerCA): `tls.crt` and `tls.key`.
// - For the ServiceAccountToken signing key: `tls.key`.
SecretName string `json:"secretName"`
}
@@ -506,6 +558,12 @@ type ClusterStatus struct {
// +optional
PolicyName string `json:"policyName,omitempty"`
// policy represents the status of the policy applied to this cluster.
// This field is set by the VirtualClusterPolicy controller.
//
// +optional
Policy *AppliedPolicy `json:"policy,omitempty"`
// KubeletPort specefies the port used by k3k-kubelet in shared mode.
//
// +optional
@@ -529,6 +587,30 @@ type ClusterStatus struct {
Phase ClusterPhase `json:"phase,omitempty"`
}
// AppliedPolicy defines the observed state of an applied policy.
type AppliedPolicy struct {
// name is the name of the VirtualClusterPolicy currently applied to this cluster.
//
// +kubebuilder:validation:MinLength:=1
// +required
Name string `json:"name,omitempty"`
// priorityClass is the priority class enforced by the active VirtualClusterPolicy.
//
// +optional
PriorityClass *string `json:"priorityClass,omitempty"`
// nodeSelector is a node selector enforced by the active VirtualClusterPolicy.
//
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// sync is the SyncConfig enforced by the active VirtualClusterPolicy.
//
// +optional
Sync *SyncConfig `json:"sync,omitempty"`
}
// ClusterPhase is a high-level summary of the cluster's current lifecycle state.
type ClusterPhase string
@@ -582,13 +664,13 @@ type VirtualClusterPolicySpec struct {
// Quota specifies the resource limits for clusters within a clusterpolicy.
//
// +optional
Quota *v1.ResourceQuotaSpec `json:"quota,omitempty"`
Quota *corev1.ResourceQuotaSpec `json:"quota,omitempty"`
// Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy
// to set defaults and constraints (min/max)
//
// +optional
Limit *v1.LimitRangeSpec `json:"limit,omitempty"`
Limit *corev1.LimitRangeSpec `json:"limit,omitempty"`
// DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace.
//

View File

@@ -25,6 +25,38 @@ func (in *Addon) DeepCopy() *Addon {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AppliedPolicy) DeepCopyInto(out *AppliedPolicy) {
*out = *in
if in.PriorityClass != nil {
in, out := &in.PriorityClass, &out.PriorityClass
*out = new(string)
**out = **in
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Sync != nil {
in, out := &in.Sync, &out.Sync
*out = new(SyncConfig)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedPolicy.
func (in *AppliedPolicy) DeepCopy() *AppliedPolicy {
if in == nil {
return nil
}
out := new(AppliedPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Cluster) DeepCopyInto(out *Cluster) {
*out = *in
@@ -173,6 +205,13 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(SyncConfig)
(*in).DeepCopyInto(*out)
}
if in.SecretMounts != nil {
in, out := &in.SecretMounts, &out.SecretMounts
*out = make([]SecretMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
@@ -193,6 +232,11 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Policy != nil {
in, out := &in.Policy, &out.Policy
*out = new(AppliedPolicy)
(*in).DeepCopyInto(*out)
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
@@ -418,6 +462,11 @@ func (in *PersistenceConfig) DeepCopyInto(out *PersistenceConfig) {
*out = new(string)
**out = **in
}
if in.StorageRequestSize != nil {
in, out := &in.StorageRequestSize, &out.StorageRequestSize
x := (*in).DeepCopy()
*out = &x
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistenceConfig.
@@ -474,6 +523,22 @@ func (in *PriorityClassSyncConfig) DeepCopy() *PriorityClassSyncConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretMount) DeepCopyInto(out *SecretMount) {
*out = *in
in.SecretVolumeSource.DeepCopyInto(&out.SecretVolumeSource)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretMount.
func (in *SecretMount) DeepCopy() *SecretMount {
if in == nil {
return nil
}
out := new(SecretMount)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretSyncConfig) DeepCopyInto(out *SecretSyncConfig) {
*out = *in
@@ -518,6 +583,28 @@ func (in *ServiceSyncConfig) DeepCopy() *ServiceSyncConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageClassSyncConfig) DeepCopyInto(out *StorageClassSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassSyncConfig.
func (in *StorageClassSyncConfig) DeepCopy() *StorageClassSyncConfig {
if in == nil {
return nil
}
out := new(StorageClassSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncConfig) DeepCopyInto(out *SyncConfig) {
*out = *in
@@ -527,6 +614,7 @@ func (in *SyncConfig) DeepCopyInto(out *SyncConfig) {
in.Ingresses.DeepCopyInto(&out.Ingresses)
in.PersistentVolumeClaims.DeepCopyInto(&out.PersistentVolumeClaims)
in.PriorityClasses.DeepCopyInto(&out.PriorityClasses)
in.StorageClasses.DeepCopyInto(&out.StorageClasses)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfig.

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"os"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"

View File

@@ -386,6 +386,11 @@ func (s *SharedAgent) role(ctx context.Context) error {
Resources: []string{"events"},
Verbs: []string{"create"},
},
{
APIGroups: []string{""},
Resources: []string{"resourcequotas"},
Verbs: []string{"get", "watch", "list"},
},
{
APIGroups: []string{"networking.k8s.io"},
Resources: []string{"ingresses"},

View File

@@ -4,7 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@@ -13,6 +13,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/mounts"
)
const (
@@ -98,6 +99,15 @@ func (v *VirtualAgent) deployment(ctx context.Context) error {
"mode": "virtual",
},
}
podSpec := v.podSpec(image, name)
if len(v.cluster.Spec.SecretMounts) > 0 {
vols, volMounts := mounts.BuildSecretsMountsVolumes(v.cluster.Spec.SecretMounts, "agent")
podSpec.Volumes = append(podSpec.Volumes, vols...)
podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volMounts...)
}
deployment := &apps.Deployment{
TypeMeta: metav1.TypeMeta{
@@ -116,7 +126,7 @@ func (v *VirtualAgent) deployment(ctx context.Context) error {
ObjectMeta: metav1.ObjectMeta{
Labels: selector.MatchLabels,
},
Spec: v.podSpec(image, name, v.cluster.Spec.AgentArgs, &selector),
Spec: podSpec,
},
},
}
@@ -124,12 +134,14 @@ func (v *VirtualAgent) deployment(ctx context.Context) error {
return v.ensureObject(ctx, deployment)
}
func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelector *metav1.LabelSelector) v1.PodSpec {
func (v *VirtualAgent) podSpec(image, name string) v1.PodSpec {
var limit v1.ResourceList
args := v.cluster.Spec.AgentArgs
args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
podSpec := v1.PodSpec{
NodeSelector: v.cluster.Spec.NodeSelector,
Volumes: []v1.Volume{
{
Name: "config",

View File

@@ -4,7 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
)
func Test_virtualAgentData(t *testing.T) {

View File

@@ -11,6 +11,7 @@ import (
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
@@ -28,6 +29,7 @@ import (
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
rbacv1 "k8s.io/api/rbac/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
@@ -43,16 +45,22 @@ import (
)
const (
namePrefix = "k3k"
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
ClusterInvalidName = "system"
SyncEnabledLabelKey = "k3k.io/sync-enabled"
SyncSourceLabelKey = "k3k.io/sync-source"
SyncSourceHostLabel = "host"
defaultVirtualClusterCIDR = "10.52.0.0/16"
defaultVirtualServiceCIDR = "10.53.0.0/16"
defaultSharedClusterCIDR = "10.42.0.0/16"
defaultSharedServiceCIDR = "10.43.0.0/16"
memberRemovalTimeout = time.Minute * 1
storageClassEnabledIndexField = "spec.sync.storageClasses.enabled"
storageClassStatusEnabledIndexField = "status.policy.sync.storageClasses.enabled"
)
var (
@@ -116,15 +124,82 @@ func Add(ctx context.Context, mgr manager.Manager, config *Config, maxConcurrent
},
}
// index the 'spec.sync.storageClasses.enabled' field
err = mgr.GetCache().IndexField(ctx, &v1beta1.Cluster{}, storageClassEnabledIndexField, func(rawObj client.Object) []string {
vc := rawObj.(*v1beta1.Cluster)
if vc.Spec.Sync != nil && vc.Spec.Sync.StorageClasses.Enabled {
return []string{"true"}
}
return []string{"false"}
})
if err != nil {
return err
}
// index the 'status.policy.sync.storageClasses.enabled' field
err = mgr.GetCache().IndexField(ctx, &v1beta1.Cluster{}, storageClassStatusEnabledIndexField, func(rawObj client.Object) []string {
vc := rawObj.(*v1beta1.Cluster)
if vc.Status.Policy != nil && vc.Status.Policy.Sync != nil && vc.Status.Policy.Sync.StorageClasses.Enabled {
return []string{"true"}
}
return []string{"false"}
})
if err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1beta1.Cluster{}).
Watches(&v1.Namespace{}, namespaceEventHandler(&reconciler)).
Watches(&storagev1.StorageClass{},
handler.EnqueueRequestsFromMapFunc(reconciler.mapStorageClassToCluster),
).
Owns(&apps.StatefulSet{}).
Owns(&v1.Service{}).
WithOptions(ctrlcontroller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
Complete(&reconciler)
}
func (r *ClusterReconciler) mapStorageClassToCluster(ctx context.Context, obj client.Object) []reconcile.Request {
log := ctrl.LoggerFrom(ctx)
if _, ok := obj.(*storagev1.StorageClass); !ok {
return nil
}
// Merge and deduplicate clusters
allClusters := make(map[types.NamespacedName]struct{})
var specClusterList v1beta1.ClusterList
if err := r.Client.List(ctx, &specClusterList, client.MatchingFields{storageClassEnabledIndexField: "true"}); err != nil {
log.Error(err, "error listing clusters with spec sync enabled for storageclass sync")
} else {
for _, cluster := range specClusterList.Items {
allClusters[client.ObjectKeyFromObject(&cluster)] = struct{}{}
}
}
var statusClusterList v1beta1.ClusterList
if err := r.Client.List(ctx, &statusClusterList, client.MatchingFields{storageClassStatusEnabledIndexField: "true"}); err != nil {
log.Error(err, "error listing clusters with status sync enabled for storageclass sync")
} else {
for _, cluster := range statusClusterList.Items {
allClusters[client.ObjectKeyFromObject(&cluster)] = struct{}{}
}
}
requests := make([]reconcile.Request, 0, len(allClusters))
for key := range allClusters {
requests = append(requests, reconcile.Request{NamespacedName: key})
}
return requests
}
func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
return handler.Funcs{
// We don't need to update for create or delete events
@@ -351,11 +426,22 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
return err
}
if err := c.bindClusterRoles(ctx, cluster); err != nil {
return err
}
if err := c.ensureKubeconfigSecret(ctx, cluster, serviceIP, 443); err != nil {
return err
}
return c.bindClusterRoles(ctx, cluster)
// Important: if you need to call the Server API of the Virtual Cluster
// this needs to be done AFTER he kubeconfig has been generated
if err := c.ensureStorageClasses(ctx, cluster); err != nil {
return err
}
return nil
}
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
@@ -621,6 +707,120 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1beta1.
return nil
}
func (c *ClusterReconciler) ensureStorageClasses(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Ensuring cluster StorageClasses")
virtualClient, err := newVirtualClient(ctx, c.Client, cluster.Name, cluster.Namespace)
if err != nil {
return fmt.Errorf("failed creating virtual client: %w", err)
}
appliedSync := cluster.Spec.Sync.DeepCopy()
// If a policy is applied to the virtual cluster we need to use its SyncConfig, if available
if cluster.Status.Policy != nil && cluster.Status.Policy.Sync != nil {
appliedSync = cluster.Status.Policy.Sync
}
// If storageclass sync is disabled, clean up any managed storage classes.
if appliedSync == nil || !appliedSync.StorageClasses.Enabled {
err := virtualClient.DeleteAllOf(ctx, &storagev1.StorageClass{}, client.MatchingLabels{SyncSourceLabelKey: SyncSourceHostLabel})
return client.IgnoreNotFound(err)
}
var hostStorageClasses storagev1.StorageClassList
if err := c.Client.List(ctx, &hostStorageClasses); err != nil {
return fmt.Errorf("failed listing host storageclasses: %w", err)
}
// filter the StorageClasses disabled for the sync, and the one not matching the selector
filteredHostStorageClasses := make(map[string]storagev1.StorageClass)
for _, sc := range hostStorageClasses.Items {
syncEnabled, found := sc.Labels[SyncEnabledLabelKey]
// if sync is disabled -> continue
if found && syncEnabled != "true" {
log.V(1).Info("sync is disabled", "sc-name", sc.Name)
continue
}
// if selector doesn't match -> continue
// an empty selector matche everything
selector := labels.SelectorFromSet(appliedSync.StorageClasses.Selector)
if !selector.Matches(labels.Set(sc.Labels)) {
log.V(1).Info("selector not matching", "sc-name", sc.Name)
continue
}
log.V(1).Info("keeping storageclass", "sc-name", sc.Name)
filteredHostStorageClasses[sc.Name] = sc
}
var virtStorageClasses storagev1.StorageClassList
if err = virtualClient.List(ctx, &virtStorageClasses, client.MatchingLabels{SyncSourceLabelKey: SyncSourceHostLabel}); err != nil {
return fmt.Errorf("failed listing virtual storageclasses: %w", err)
}
// delete StorageClasses with the sync disabled
for _, sc := range virtStorageClasses.Items {
if _, found := filteredHostStorageClasses[sc.Name]; !found {
log.V(1).Info("deleting storageclass", "sc-name", sc.Name)
if errDelete := virtualClient.Delete(ctx, &sc); errDelete != nil {
log.Error(errDelete, "failed to delete virtual storageclass", "name", sc.Name)
err = errors.Join(err, errDelete)
}
}
}
for _, hostSc := range filteredHostStorageClasses {
log.V(1).Info("updating storageclass", "sc-name", hostSc.Name)
virtualSc := hostSc.DeepCopy()
virtualSc.ObjectMeta = metav1.ObjectMeta{
Name: hostSc.Name,
Labels: hostSc.Labels,
Annotations: hostSc.Annotations,
}
_, errCreateOrUpdate := controllerutil.CreateOrUpdate(ctx, virtualClient, virtualSc, func() error {
virtualSc.Annotations = hostSc.Annotations
virtualSc.Labels = hostSc.Labels
if len(virtualSc.Labels) == 0 {
virtualSc.Labels = make(map[string]string)
}
virtualSc.Labels[SyncSourceLabelKey] = SyncSourceHostLabel
virtualSc.Provisioner = hostSc.Provisioner
virtualSc.Parameters = hostSc.Parameters
virtualSc.ReclaimPolicy = hostSc.ReclaimPolicy
virtualSc.MountOptions = hostSc.MountOptions
virtualSc.AllowVolumeExpansion = hostSc.AllowVolumeExpansion
virtualSc.VolumeBindingMode = hostSc.VolumeBindingMode
virtualSc.AllowedTopologies = hostSc.AllowedTopologies
return nil
})
if errCreateOrUpdate != nil {
log.Error(errCreateOrUpdate, "failed to create or update virtual storageclass", "name", virtualSc.Name)
err = errors.Join(err, errCreateOrUpdate)
}
}
if err != nil {
return fmt.Errorf("failed to sync storageclasses: %w", err)
}
return nil
}
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server) error {
log := ctrl.LoggerFrom(ctx)
@@ -743,11 +943,6 @@ func (c *ClusterReconciler) validate(cluster *v1beta1.Cluster, policy v1beta1.Vi
}
}
// validate sync policy
if !equality.Semantic.DeepEqual(cluster.Spec.Sync, policy.Spec.Sync) {
return fmt.Errorf("sync configuration %v is not allowed by the policy %q", cluster.Spec.Sync, policy.Name)
}
return nil
}

View File

@@ -16,6 +16,7 @@ import (
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster"
@@ -40,6 +41,7 @@ var (
var _ = BeforeSuite(func() {
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "templates", "crds")},
ErrorIfCRDPathMissing: true,
@@ -60,7 +62,7 @@ var _ = BeforeSuite(func() {
ctrl.SetLogger(zapr.NewLogger(zap.NewNop()))
mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme})
mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme, Metrics: metricsserver.Options{BindAddress: "0"}})
Expect(err).NotTo(HaveOccurred())
portAllocator, err := agent.NewPortAllocator(ctx, mgr.GetClient())
@@ -81,6 +83,7 @@ var _ = BeforeSuite(func() {
go func() {
defer GinkgoRecover()
err = mgr.Start(ctx)
Expect(err).NotTo(HaveOccurred(), "failed to run manager")
}()
@@ -90,6 +93,7 @@ var _ = AfterSuite(func() {
cancel()
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})

View File

@@ -2,11 +2,14 @@ package cluster_test
import (
"context"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -32,6 +35,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
createdNS := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"}}
err := k8sClient.Create(context.Background(), createdNS)
Expect(err).To(Not(HaveOccurred()))
namespace = createdNS.Name
})
@@ -66,7 +70,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
Expect(cluster.Spec.Sync.PriorityClasses.Enabled).To(BeFalse())
Expect(cluster.Spec.Persistence.Type).To(Equal(v1beta1.DynamicPersistenceMode))
Expect(cluster.Spec.Persistence.StorageRequestSize).To(Equal("2G"))
Expect(cluster.Spec.Persistence.StorageRequestSize.Equal(resource.MustParse("2G"))).To(BeTrue())
Expect(cluster.Status.Phase).To(Equal(v1beta1.ClusterUnknown))
@@ -76,6 +80,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
Eventually(func() string {
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)
Expect(err).To(Not(HaveOccurred()))
return cluster.Status.HostVersion
}).
WithTimeout(time.Second * 30).
@@ -127,6 +132,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
err := k8sClient.Get(ctx, serviceKey, &service)
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
return service.Spec.Type
}).
WithTimeout(time.Second * 30).
@@ -162,6 +168,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
err := k8sClient.Get(ctx, serviceKey, &service)
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
return service.Spec.Type
}).
WithTimeout(time.Second * 30).
@@ -210,6 +217,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
err := k8sClient.Get(ctx, serviceKey, &service)
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
return service.Spec.Type
}).
WithTimeout(time.Second * 30).
@@ -294,6 +302,175 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
Expect(err).To(HaveOccurred())
})
})
When("adding addons to the cluster", func() {
It("will create a statefulset with the correct addon volumes and volume mounts", func() {
// Create the addon secret first
addonSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test-addon",
Namespace: namespace,
},
Data: map[string][]byte{
"manifest.yaml": []byte("apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: test-cm\n"),
},
}
Expect(k8sClient.Create(ctx, addonSecret)).To(Succeed())
// Create the cluster with an addon referencing the secret
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1beta1.ClusterSpec{
Addons: []v1beta1.Addon{
{
SecretRef: "test-addon",
},
},
},
}
Expect(k8sClient.Create(ctx, cluster)).To(Succeed())
// Wait for the statefulset to be created and verify volumes/mounts
var statefulSet appsv1.StatefulSet
statefulSetName := k3kcontroller.SafeConcatNameWithPrefix(cluster.Name, "server")
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{
Name: statefulSetName,
Namespace: cluster.Namespace,
}, &statefulSet)
}).
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Succeed())
// Verify the addon volume exists
var addonVolume *corev1.Volume
for i := range statefulSet.Spec.Template.Spec.Volumes {
v := &statefulSet.Spec.Template.Spec.Volumes[i]
if v.Name == "addon-test-addon" {
addonVolume = v
break
}
}
Expect(addonVolume).NotTo(BeNil(), "addon volume should exist")
Expect(addonVolume.VolumeSource.Secret).NotTo(BeNil())
Expect(addonVolume.VolumeSource.Secret.SecretName).To(Equal("test-addon"))
// Verify the addon volume mount exists in the first container
containers := statefulSet.Spec.Template.Spec.Containers
Expect(containers).NotTo(BeEmpty())
var addonMount *corev1.VolumeMount
for i := range containers[0].VolumeMounts {
m := &containers[0].VolumeMounts[i]
if m.Name == "addon-test-addon" {
addonMount = m
break
}
}
Expect(addonMount).NotTo(BeNil(), "addon volume mount should exist")
Expect(addonMount.MountPath).To(Equal("/var/lib/rancher/k3s/server/manifests/test-addon"))
Expect(addonMount.ReadOnly).To(BeTrue())
})
It("will create volumes for multiple addons in the correct order", func() {
// Create multiple addon secrets
addonSecret1 := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "addon-one",
Namespace: namespace,
},
Data: map[string][]byte{
"manifest.yaml": []byte("apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: cm-one\n"),
},
}
addonSecret2 := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "addon-two",
Namespace: namespace,
},
Data: map[string][]byte{
"manifest.yaml": []byte("apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: cm-two\n"),
},
}
Expect(k8sClient.Create(ctx, addonSecret1)).To(Succeed())
Expect(k8sClient.Create(ctx, addonSecret2)).To(Succeed())
// Create the cluster with multiple addons in specific order
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1beta1.ClusterSpec{
Addons: []v1beta1.Addon{
{SecretRef: "addon-one"},
{SecretRef: "addon-two"},
},
},
}
Expect(k8sClient.Create(ctx, cluster)).To(Succeed())
// Wait for the statefulset to be created
var statefulSet appsv1.StatefulSet
statefulSetName := k3kcontroller.SafeConcatNameWithPrefix(cluster.Name, "server")
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{
Name: statefulSetName,
Namespace: cluster.Namespace,
}, &statefulSet)
}).
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Succeed())
// Verify both addon volumes exist and are in the correct order
volumes := statefulSet.Spec.Template.Spec.Volumes
// Extract only addon volumes (those starting with "addon-")
var addonVolumes []corev1.Volume
for _, v := range volumes {
if strings.HasPrefix(v.Name, "addon-") {
addonVolumes = append(addonVolumes, v)
}
}
Expect(addonVolumes).To(HaveLen(2))
Expect(addonVolumes[0].Name).To(Equal("addon-addon-one"))
Expect(addonVolumes[1].Name).To(Equal("addon-addon-two"))
// Verify both addon volume mounts exist and are in the correct order
containers := statefulSet.Spec.Template.Spec.Containers
Expect(containers).NotTo(BeEmpty())
// Extract only addon mounts (those starting with "addon-")
var addonMounts []corev1.VolumeMount
for _, m := range containers[0].VolumeMounts {
if strings.HasPrefix(m.Name, "addon-") {
addonMounts = append(addonMounts, m)
}
}
Expect(addonMounts).To(HaveLen(2))
Expect(addonMounts[0].Name).To(Equal("addon-addon-one"))
Expect(addonMounts[0].MountPath).To(Equal("/var/lib/rancher/k3s/server/manifests/addon-one"))
Expect(addonMounts[1].Name).To(Equal("addon-addon-two"))
Expect(addonMounts[1].MountPath).To(Equal("/var/lib/rancher/k3s/server/manifests/addon-two"))
})
})
})
})
})

View File

@@ -0,0 +1,60 @@
package mounts
import (
v1 "k8s.io/api/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func BuildSecretsMountsVolumes(secretMounts []v1beta1.SecretMount, role string) ([]v1.Volume, []v1.VolumeMount) {
var (
vols []v1.Volume
volMounts []v1.VolumeMount
)
for _, secretMount := range secretMounts {
if secretMount.SecretName == "" || secretMount.MountPath == "" {
continue
}
if secretMount.Role == role || secretMount.Role == "" || secretMount.Role == "all" {
vol, volMount := buildSecretMountVolume(secretMount)
vols = append(vols, vol)
volMounts = append(volMounts, volMount)
}
}
return vols, volMounts
}
func buildSecretMountVolume(secretMount v1beta1.SecretMount) (v1.Volume, v1.VolumeMount) {
projectedVolSources := []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: secretMount.SecretName,
},
Items: secretMount.Items,
Optional: secretMount.Optional,
},
},
}
vol := v1.Volume{
Name: secretMount.SecretName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: projectedVolSources,
},
},
}
volMount := v1.VolumeMount{
Name: secretMount.SecretName,
MountPath: secretMount.MountPath,
SubPath: secretMount.SubPath,
}
return vol, volMount
}

View File

@@ -0,0 +1,523 @@
package mounts
import (
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func Test_BuildSecretMountsVolume(t *testing.T) {
type args struct {
secretMounts []v1beta1.SecretMount
role string
}
type expectedVolumes struct {
vols []v1.Volume
volMounts []v1.VolumeMount
}
tests := []struct {
name string
args args
expectedData expectedVolumes
}{
{
name: "empty secret mounts",
args: args{
secretMounts: []v1beta1.SecretMount{},
role: "server",
},
expectedData: expectedVolumes{
vols: nil,
volMounts: nil,
},
},
{
name: "nil secret mounts",
args: args{
secretMounts: nil,
role: "server",
},
expectedData: expectedVolumes{
vols: nil,
volMounts: nil,
},
},
{
name: "single secret mount with no role specified defaults to all",
args: args{
secretMounts: []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "secret-1",
},
MountPath: "/mount-dir-1",
},
},
role: "server",
},
expectedData: expectedVolumes{
vols: []v1.Volume{
expectedVolume("secret-1", nil),
},
volMounts: []v1.VolumeMount{
expectedVolumeMount("secret-1", "/mount-dir-1", ""),
},
},
},
{
name: "multiple secrets mounts with no role specified defaults to all",
args: args{
secretMounts: []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "secret-1",
},
MountPath: "/mount-dir-1",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "secret-2",
},
MountPath: "/mount-dir-2",
},
},
role: "server",
},
expectedData: expectedVolumes{
vols: []v1.Volume{
expectedVolume("secret-1", nil),
expectedVolume("secret-2", nil),
},
volMounts: []v1.VolumeMount{
expectedVolumeMount("secret-1", "/mount-dir-1", ""),
expectedVolumeMount("secret-2", "/mount-dir-2", ""),
},
},
},
{
name: "single secret mount with items",
args: args{
secretMounts: []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "secret-1",
Items: []v1.KeyToPath{
{
Key: "key-1",
Path: "path-1",
},
},
},
MountPath: "/mount-dir-1",
},
},
role: "server",
},
expectedData: expectedVolumes{
vols: []v1.Volume{
expectedVolume("secret-1", []v1.KeyToPath{{Key: "key-1", Path: "path-1"}}),
},
volMounts: []v1.VolumeMount{
expectedVolumeMount("secret-1", "/mount-dir-1", ""),
},
},
},
{
name: "multiple secret mounts with items",
args: args{
secretMounts: []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "secret-1",
Items: []v1.KeyToPath{
{
Key: "key-1",
Path: "path-1",
},
},
},
MountPath: "/mount-dir-1",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "secret-2",
Items: []v1.KeyToPath{
{
Key: "key-2",
Path: "path-2",
},
},
},
MountPath: "/mount-dir-2",
},
},
role: "server",
},
expectedData: expectedVolumes{
vols: []v1.Volume{
expectedVolume("secret-1", []v1.KeyToPath{{Key: "key-1", Path: "path-1"}}),
expectedVolume("secret-2", []v1.KeyToPath{{Key: "key-2", Path: "path-2"}}),
},
volMounts: []v1.VolumeMount{
expectedVolumeMount("secret-1", "/mount-dir-1", ""),
expectedVolumeMount("secret-2", "/mount-dir-2", ""),
},
},
},
{
name: "user will specify the order",
args: args{
secretMounts: []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "z-secret",
},
MountPath: "/z",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "a-secret",
},
MountPath: "/a",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "m-secret",
},
MountPath: "/m",
},
},
role: "server",
},
expectedData: expectedVolumes{
vols: []v1.Volume{
expectedVolume("z-secret", nil),
expectedVolume("a-secret", nil),
expectedVolume("m-secret", nil),
},
volMounts: []v1.VolumeMount{
expectedVolumeMount("z-secret", "/z", ""),
expectedVolumeMount("a-secret", "/a", ""),
expectedVolumeMount("m-secret", "/m", ""),
},
},
},
{
name: "skip entries with empty secret name",
args: args{
secretMounts: []v1beta1.SecretMount{
{
MountPath: "/mount-dir-1",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "secret-2",
},
MountPath: "/mount-dir-2",
},
},
role: "server",
},
expectedData: expectedVolumes{
vols: []v1.Volume{
expectedVolume("secret-2", nil),
},
volMounts: []v1.VolumeMount{
expectedVolumeMount("secret-2", "/mount-dir-2", ""),
},
},
},
{
name: "skip entries with empty mount path",
args: args{
secretMounts: []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "secret-1",
},
MountPath: "",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "secret-2",
},
MountPath: "/mount-dir-2",
},
},
role: "server",
},
expectedData: expectedVolumes{
vols: []v1.Volume{
expectedVolume("secret-2", nil),
},
volMounts: []v1.VolumeMount{
expectedVolumeMount("secret-2", "/mount-dir-2", ""),
},
},
},
{
name: "secret mount with subPath",
args: args{
secretMounts: []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "secret-1",
},
MountPath: "/etc/rancher/k3s/registries.yaml",
SubPath: "registries.yaml",
},
},
role: "server",
},
expectedData: expectedVolumes{
vols: []v1.Volume{
expectedVolume("secret-1", nil),
},
volMounts: []v1.VolumeMount{
expectedVolumeMount("secret-1", "/etc/rancher/k3s/registries.yaml", "registries.yaml"),
},
},
},
// Role-based filtering tests
{
name: "role server includes only server and all roles",
args: args{
secretMounts: []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "server-secret",
},
MountPath: "/server",
Role: "server",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "agent-secret",
},
MountPath: "/agent",
Role: "agent",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "all-secret",
},
MountPath: "/all",
Role: "all",
},
},
role: "server",
},
expectedData: expectedVolumes{
vols: []v1.Volume{
expectedVolume("server-secret", nil),
expectedVolume("all-secret", nil),
},
volMounts: []v1.VolumeMount{
expectedVolumeMount("server-secret", "/server", ""),
expectedVolumeMount("all-secret", "/all", ""),
},
},
},
{
name: "role agent includes only agent and all roles",
args: args{
secretMounts: []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "server-secret",
},
MountPath: "/server",
Role: "server",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "agent-secret",
},
MountPath: "/agent",
Role: "agent",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "all-secret",
},
MountPath: "/all",
Role: "all",
},
},
role: "agent",
},
expectedData: expectedVolumes{
vols: []v1.Volume{
expectedVolume("agent-secret", nil),
expectedVolume("all-secret", nil),
},
volMounts: []v1.VolumeMount{
expectedVolumeMount("agent-secret", "/agent", ""),
expectedVolumeMount("all-secret", "/all", ""),
},
},
},
{
name: "empty role in secret mount defaults to all",
args: args{
secretMounts: []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "no-role-secret",
},
MountPath: "/no-role",
Role: "",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "server-secret",
},
MountPath: "/server",
Role: "server",
},
},
role: "agent",
},
expectedData: expectedVolumes{
vols: []v1.Volume{
expectedVolume("no-role-secret", nil),
},
volMounts: []v1.VolumeMount{
expectedVolumeMount("no-role-secret", "/no-role", ""),
},
},
},
{
name: "mixed roles with server filter",
args: args{
secretMounts: []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "registry-config",
},
MountPath: "/etc/rancher/k3s/registries.yaml",
SubPath: "registries.yaml",
Role: "all",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "server-config",
},
MountPath: "/etc/server",
Role: "server",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "agent-config",
},
MountPath: "/etc/agent",
Role: "agent",
},
},
role: "server",
},
expectedData: expectedVolumes{
vols: []v1.Volume{
expectedVolume("registry-config", nil),
expectedVolume("server-config", nil),
},
volMounts: []v1.VolumeMount{
expectedVolumeMount("registry-config", "/etc/rancher/k3s/registries.yaml", "registries.yaml"),
expectedVolumeMount("server-config", "/etc/server", ""),
},
},
},
{
name: "all secrets have role all",
args: args{
secretMounts: []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "secret-1",
},
MountPath: "/secret-1",
Role: "all",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "secret-2",
},
MountPath: "/secret-2",
Role: "all",
},
},
role: "server",
},
expectedData: expectedVolumes{
vols: []v1.Volume{
expectedVolume("secret-1", nil),
expectedVolume("secret-2", nil),
},
volMounts: []v1.VolumeMount{
expectedVolumeMount("secret-1", "/secret-1", ""),
expectedVolumeMount("secret-2", "/secret-2", ""),
},
},
},
{
name: "no secrets match agent role",
args: args{
secretMounts: []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "server-only",
},
MountPath: "/server-only",
Role: "server",
},
},
role: "agent",
},
expectedData: expectedVolumes{
vols: nil,
volMounts: nil,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
vols, volMounts := BuildSecretsMountsVolumes(tt.args.secretMounts, tt.args.role)
assert.Equal(t, tt.expectedData.vols, vols)
assert.Equal(t, tt.expectedData.volMounts, volMounts)
})
}
}
func expectedVolume(name string, items []v1.KeyToPath) v1.Volume {
return v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Items: items,
}},
},
},
},
}
}
func expectedVolumeMount(name, mountPath, subPath string) v1.VolumeMount {
return v1.VolumeMount{
Name: name,
MountPath: mountPath,
SubPath: subPath,
}
}

View File

@@ -81,7 +81,7 @@ func serverOptions(cluster *v1beta1.Cluster, token string) string {
}
if cluster.Spec.Mode != agent.VirtualNodeMode {
opts = opts + "disable-agent: true\negress-selector-mode: disabled\ndisable:\n- servicelb\n- traefik\n- metrics-server\n- local-storage"
opts = opts + "disable-agent: true\negress-selector-mode: disabled\ndisable:\n- servicelb\n- traefik\n- metrics-server\n- local-storage\n"
}
// TODO: Add extra args to the options

View File

@@ -8,7 +8,6 @@ import (
"strings"
"text/template"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
@@ -22,13 +21,13 @@ import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/mounts"
)
const (
k3kSystemNamespace = "k3k-system"
serverName = "server"
configName = "server-config"
initConfigName = "init-server-config"
serverName = "server"
configName = "server-config"
initConfigName = "init-server-config"
)
// Server
@@ -280,67 +279,31 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
volumeMounts []v1.VolumeMount
)
for _, addon := range s.cluster.Spec.Addons {
namespace := k3kSystemNamespace
if addon.SecretNamespace != "" {
namespace = addon.SecretNamespace
}
nn := types.NamespacedName{
Name: addon.SecretRef,
Namespace: namespace,
}
var addons v1.Secret
if err := s.client.Get(ctx, nn, &addons); err != nil {
return nil, err
}
clusterAddons := v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: addons.Name,
Namespace: s.cluster.Namespace,
},
Data: make(map[string][]byte, len(addons.Data)),
}
for k, v := range addons.Data {
clusterAddons.Data[k] = v
}
if err := s.client.Create(ctx, &clusterAddons); err != nil {
return nil, err
}
name := "varlibrancherk3smanifests" + addon.SecretRef
volume := v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: addon.SecretRef,
},
},
}
volumes = append(volumes, volume)
volumeMount := v1.VolumeMount{
Name: name,
MountPath: "/var/lib/rancher/k3s/server/manifests/" + addon.SecretRef,
// changes to this part of the filesystem shouldn't be done manually. The secret should be updated instead.
ReadOnly: true,
}
volumeMounts = append(volumeMounts, volumeMount)
}
if s.cluster.Spec.CustomCAs != nil && s.cluster.Spec.CustomCAs.Enabled {
vols, mounts, err := s.loadCACertBundle(ctx)
if len(s.cluster.Spec.Addons) > 0 {
addonsVols, addonsMounts, err := s.buildAddonsVolumes(ctx)
if err != nil {
return nil, err
}
volumes = append(volumes, addonsVols...)
volumeMounts = append(volumeMounts, addonsMounts...)
}
if s.cluster.Spec.CustomCAs != nil && s.cluster.Spec.CustomCAs.Enabled {
vols, mounts, err := s.buildCABundleVolumes(ctx)
if err != nil {
return nil, err
}
volumes = append(volumes, vols...)
volumeMounts = append(volumeMounts, mounts...)
}
if len(s.cluster.Spec.SecretMounts) > 0 {
vols, mounts := mounts.BuildSecretsMountsVolumes(s.cluster.Spec.SecretMounts, "server")
volumes = append(volumes, vols...)
volumeMounts = append(volumeMounts, mounts...)
@@ -406,7 +369,7 @@ func (s *Server) setupDynamicPersistence() v1.PersistentVolumeClaim {
StorageClassName: s.cluster.Spec.Persistence.StorageClassName,
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(s.cluster.Spec.Persistence.StorageRequestSize),
"storage": *s.cluster.Spec.Persistence.StorageRequestSize,
},
},
},
@@ -442,7 +405,7 @@ func (s *Server) setupStartCommand() (string, error) {
return output.String(), nil
}
func (s *Server) loadCACertBundle(ctx context.Context) ([]v1.Volume, []v1.VolumeMount, error) {
func (s *Server) buildCABundleVolumes(ctx context.Context) ([]v1.Volume, []v1.VolumeMount, error) {
if s.cluster.Spec.CustomCAs == nil {
return nil, nil, fmt.Errorf("customCAs not found")
}
@@ -534,6 +497,71 @@ func (s *Server) mountCACert(volumeName, certName, secretName string, subPathMou
return volume, mounts
}
func (s *Server) buildAddonsVolumes(ctx context.Context) ([]v1.Volume, []v1.VolumeMount, error) {
var (
volumes []v1.Volume
mounts []v1.VolumeMount
)
for _, addon := range s.cluster.Spec.Addons {
namespace := s.cluster.Namespace
if addon.SecretNamespace != "" {
namespace = addon.SecretNamespace
}
nn := types.NamespacedName{
Name: addon.SecretRef,
Namespace: namespace,
}
var addons v1.Secret
if err := s.client.Get(ctx, nn, &addons); err != nil {
return nil, nil, err
}
// skip creating the addon secret if it already exists and in the same namespace as the cluster
if namespace != s.cluster.Namespace {
clusterAddons := v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: addons.Name,
Namespace: s.cluster.Namespace,
},
Data: addons.Data,
}
if _, err := controllerutil.CreateOrUpdate(ctx, s.client, &clusterAddons, func() error {
return controllerutil.SetOwnerReference(s.cluster, &clusterAddons, s.client.Scheme())
}); err != nil {
return nil, nil, err
}
}
name := "addon-" + addon.SecretRef
volume := v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: addon.SecretRef,
},
},
}
volumes = append(volumes, volume)
volumeMount := v1.VolumeMount{
Name: name,
MountPath: "/var/lib/rancher/k3s/server/manifests/" + addon.SecretRef,
ReadOnly: true,
}
mounts = append(mounts, volumeMount)
}
return volumes, mounts, nil
}
func sortedKeys(keyMap map[string]string) []string {
keys := make([]string, 0, len(keyMap))

View File

@@ -54,6 +54,7 @@ func AddStatefulSetController(ctx context.Context, mgr manager.Manager, maxConcu
return ctrl.NewControllerManagedBy(mgr).
For(&apps.StatefulSet{}).
WithEventFilter(newClusterPredicate()).
Owns(&v1.Pod{}).
Named(statefulsetController).
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
@@ -192,7 +193,9 @@ func (p *StatefulSetReconciler) getETCDTLS(ctx context.Context, cluster *v1beta1
return true
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, err

View File

@@ -2,13 +2,17 @@ package policy
import (
"context"
"errors"
"fmt"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
@@ -52,15 +56,36 @@ func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context)
}
for _, ns := range namespaces.Items {
selector := labels.NewSelector()
currentPolicyName := ns.Labels[PolicyNameLabelKey]
if req, err := labels.NewRequirement(ManagedByLabelKey, selection.Equals, []string{VirtualPolicyControllerName}); err == nil {
selector = selector.Add(*req)
}
// This will match all the resources managed by the K3k Policy controller
// that have the app.kubernetes.io/managed-by=k3k-policy-controller label
selector := labels.SelectorFromSet(labels.Set{
ManagedByLabelKey: VirtualPolicyControllerName,
})
// if the namespace is bound to a policy -> cleanup resources of other policies
if ns.Labels[PolicyNameLabelKey] != "" {
requirement, err := labels.NewRequirement(PolicyNameLabelKey, selection.NotEquals, []string{ns.Labels[PolicyNameLabelKey]})
// If the namespace is not bound to any policy, or if the policy it was bound to no longer exists,
// we need to clear policy-related fields on its Cluster objects.
if currentPolicyName == "" {
if err := c.clearPolicyFieldsForClustersInNamespace(ctx, ns.Name); err != nil {
log.Error(err, "error clearing policy fields for clusters in unbound namespace", "namespace", ns.Name)
}
} else {
var policy v1beta1.VirtualClusterPolicy
if err := c.Client.Get(ctx, types.NamespacedName{Name: currentPolicyName}, &policy); err != nil {
if apierrors.IsNotFound(err) {
if err := c.clearPolicyFieldsForClustersInNamespace(ctx, ns.Name); err != nil {
log.Error(err, "error clearing policy fields for clusters in namespace with non-existent policy", "namespace", ns.Name, "policy", currentPolicyName)
}
} else {
log.Error(err, "error getting policy for namespace", "namespace", ns.Name, "policy", currentPolicyName)
}
}
// if the namespace is bound to a policy -> cleanup resources of other policies
requirement, err := labels.NewRequirement(
PolicyNameLabelKey, selection.NotEquals, []string{currentPolicyName},
)
// log the error but continue cleaning up the other namespaces
if err != nil {
@@ -90,3 +115,30 @@ func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context)
return nil
}
// clearPolicyFieldsForClustersInNamespace sets the policy status on Cluster objects in the given namespace to nil.
func (c *VirtualClusterPolicyReconciler) clearPolicyFieldsForClustersInNamespace(ctx context.Context, namespace string) error {
log := ctrl.LoggerFrom(ctx)
var clusters v1beta1.ClusterList
if err := c.Client.List(ctx, &clusters, client.InNamespace(namespace)); err != nil {
return fmt.Errorf("failed listing clusters in namespace %s: %w", namespace, err)
}
var updateErrs []error
for i := range clusters.Items {
cluster := clusters.Items[i]
if cluster.Status.Policy != nil {
log.V(1).Info("Clearing policy status for Cluster", "cluster", cluster.Name, "namespace", namespace)
cluster.Status.Policy = nil
if updateErr := c.Client.Status().Update(ctx, &cluster); updateErr != nil {
updateErr = fmt.Errorf("failed updating Status for Cluster %s: %w", cluster.Name, updateErr)
updateErrs = append(updateErrs, updateErr)
}
}
}
return errors.Join(updateErrs...)
}

View File

@@ -165,6 +165,7 @@ func nodeEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
if oldNode.Spec.PodCIDR != newNode.Spec.PodCIDR {
podCIDRChanged = true
}
if !reflect.DeepEqual(oldNode.Spec.PodCIDRs, newNode.Spec.PodCIDRs) {
podCIDRChanged = true
}
@@ -469,16 +470,22 @@ func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context,
var clusterUpdateErrs []error
for _, cluster := range clusters.Items {
orig := cluster.DeepCopy()
origStatus := cluster.Status.DeepCopy()
cluster.Spec.PriorityClass = policy.Spec.DefaultPriorityClass
cluster.Spec.NodeSelector = policy.Spec.DefaultNodeSelector
cluster.Status.Policy = &v1beta1.AppliedPolicy{
Name: policy.Name,
PriorityClass: &policy.Spec.DefaultPriorityClass,
NodeSelector: policy.Spec.DefaultNodeSelector,
Sync: policy.Spec.Sync,
}
if !reflect.DeepEqual(orig, cluster) {
if !reflect.DeepEqual(origStatus, &cluster.Status) {
log.V(1).Info("Updating Cluster", "cluster", cluster.Name, "namespace", namespace.Name)
// continue updating also the other clusters even if an error occurred
clusterUpdateErrs = append(clusterUpdateErrs, c.Client.Update(ctx, &cluster))
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
clusterUpdateErrs = append(clusterUpdateErrs, err)
}
}
}

View File

@@ -15,6 +15,7 @@ import (
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
ctrl "sigs.k8s.io/controller-runtime"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/policy"
@@ -37,6 +38,7 @@ var (
var _ = BeforeSuite(func() {
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "templates", "crds")},
ErrorIfCRDPathMissing: true,
@@ -48,7 +50,7 @@ var _ = BeforeSuite(func() {
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme})
mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme, Metrics: metricsserver.Options{BindAddress: "0"}})
Expect(err).NotTo(HaveOccurred())
ctrl.SetLogger(zapr.NewLogger(zap.NewNop()))
@@ -59,6 +61,7 @@ var _ = BeforeSuite(func() {
go func() {
defer GinkgoRecover()
err = mgr.Start(ctx)
Expect(err).NotTo(HaveOccurred(), "failed to run manager")
}()
@@ -68,6 +71,7 @@ var _ = AfterSuite(func() {
cancel()
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})

View File

@@ -2,7 +2,6 @@ package policy_test
import (
"context"
"reflect"
"time"
"k8s.io/apimachinery/pkg/api/resource"
@@ -78,6 +77,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
Namespace: namespace.Name,
}
return k8sClient.Get(ctx, key, networkPolicy)
}).
WithTimeout(time.Minute).
@@ -133,6 +133,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
Namespace: namespace.Name,
}
return k8sClient.Get(context.Background(), key, networkPolicy)
}).
WithTimeout(time.Minute).
@@ -155,6 +156,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
Namespace: namespace.Name,
}
return k8sClient.Get(ctx, key, networkPolicy)
}).
WithTimeout(time.Second * 10).
@@ -183,6 +185,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Eventually(func() string {
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
Expect(err).To(Not(HaveOccurred()))
return ns.Labels["pod-security.kubernetes.io/enforce"]
}).
WithTimeout(time.Second * 10).
@@ -208,6 +211,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Eventually(func() string {
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
Expect(err).To(Not(HaveOccurred()))
return ns.Labels["pod-security.kubernetes.io/enforce"]
}).
WithTimeout(time.Second * 10).
@@ -229,6 +233,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Eventually(func() string {
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
Expect(err).To(Not(HaveOccurred()))
return ns.Labels["pod-security.kubernetes.io/enforce"]
}).
WithTimeout(time.Second * 10).
@@ -250,7 +255,9 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Eventually(func() bool {
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
Expect(err).To(Not(HaveOccurred()))
_, found := ns.Labels["pod-security.kubernetes.io/enforce"]
return found
}).
WithTimeout(time.Second * 10).
@@ -278,7 +285,9 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
Expect(err).To(Not(HaveOccurred()))
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
return enforceValue == "privileged"
}).
WithTimeout(time.Second * 10).
@@ -296,7 +305,9 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Eventually(func() bool {
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
Expect(err).To(Not(HaveOccurred()))
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
return enforceValue == "privileged"
}).
WithTimeout(time.Second * 10).
@@ -307,7 +318,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
})
It("should update Cluster's PriorityClass", func() {
It("updates the Cluster's policy status with the DefaultPriorityClass", func() {
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultPriorityClass: "foobar",
})
@@ -329,19 +340,22 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
// wait a bit
Eventually(func() bool {
Eventually(func(g Gomega) {
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, cluster)
Expect(err).To(Not(HaveOccurred()))
return cluster.Spec.PriorityClass == policy.Spec.DefaultPriorityClass
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(cluster.Spec.PriorityClass).To(BeEmpty())
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
g.Expect(cluster.Status.Policy.PriorityClass).To(Not(BeNil()))
g.Expect(*cluster.Status.Policy.PriorityClass).To(Equal(policy.Spec.DefaultPriorityClass))
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
Should(Succeed())
})
It("should update Cluster's NodeSelector", func() {
It("updates the Cluster's policy status with the DefaultNodeSelector", func() {
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
})
@@ -366,18 +380,21 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Expect(err).To(Not(HaveOccurred()))
// wait a bit
Eventually(func() bool {
Eventually(func(g Gomega) {
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, cluster)
Expect(err).To(Not(HaveOccurred()))
return reflect.DeepEqual(cluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
g.Expect(cluster.Spec.NodeSelector).To(BeEmpty())
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
g.Expect(cluster.Status.Policy.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
Should(Succeed())
})
It("should update the nodeSelector if changed", func() {
It("updates the Cluster's policy status when the VCP nodeSelector changes", func() {
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
})
@@ -399,43 +416,57 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.NodeSelector).To(Equal(policy.Spec.DefaultNodeSelector))
// Cluster Spec should not change, VCP NodeSelector should be present in the Status
Eventually(func(g Gomega) {
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, cluster)
Expect(err).To(Not(HaveOccurred()))
g.Expect(cluster.Spec.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
g.Expect(cluster.Status.Policy.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(Succeed())
// update the VirtualClusterPolicy
policy.Spec.DefaultNodeSelector["label-2"] = "value-2"
err = k8sClient.Update(ctx, policy)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.NodeSelector).To(Not(Equal(policy.Spec.DefaultNodeSelector)))
// wait a bit
Eventually(func() bool {
Eventually(func(g Gomega) {
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, cluster)
Expect(err).To(Not(HaveOccurred()))
return reflect.DeepEqual(cluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
g.Expect(cluster.Spec.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
g.Expect(cluster.Status.Policy.NodeSelector).To(Equal(map[string]string{"label-1": "value-1", "label-2": "value-2"}))
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
Should(Succeed())
// Update the Cluster
err = k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)
Expect(err).To(Not(HaveOccurred()))
cluster.Spec.NodeSelector["label-3"] = "value-3"
err = k8sClient.Update(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.NodeSelector).To(Not(Equal(policy.Spec.DefaultNodeSelector)))
// wait a bit and check it's restored
Eventually(func() bool {
var updatedCluster v1beta1.Cluster
Consistently(func(g Gomega) {
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, &updatedCluster)
Expect(err).To(Not(HaveOccurred()))
return reflect.DeepEqual(updatedCluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
err = k8sClient.Get(ctx, key, cluster)
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(cluster.Spec.NodeSelector).To(Equal(map[string]string{"label-1": "value-1", "label-3": "value-3"}))
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
Should(Succeed())
})
It("should create a ResourceQuota if Quota is enabled", func() {
@@ -451,6 +482,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
bindPolicyToNamespace(namespace, policy)
var resourceQuota v1.ResourceQuota
Eventually(func() error {
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
@@ -485,6 +517,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
Namespace: namespace.Name,
}
return k8sClient.Get(ctx, key, &resourceQuota)
}).
WithTimeout(time.Minute).
@@ -494,6 +527,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
// get policy again
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(policy), policy)
Expect(err).To(Not(HaveOccurred()))
policy.Spec.Quota = nil
err = k8sClient.Update(ctx, policy)
Expect(err).To(Not(HaveOccurred()))
@@ -505,6 +539,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Namespace: namespace.Name,
}
err := k8sClient.Get(ctx, key, &resourceQuota)
return apierrors.IsNotFound(err)
}).
WithTimeout(time.Second * 10).
@@ -531,6 +566,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterPolicy.Name),
Namespace: namespace.Name,
}
return k8sClient.Get(ctx, key, &resourceQuota)
}).
WithTimeout(time.Minute).
@@ -548,6 +584,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Namespace: namespace.Name,
}
err := k8sClient.Get(ctx, key, &resourceQuota)
return apierrors.IsNotFound(err)
}).
WithTimeout(time.Second * 10).

View File

@@ -27,6 +27,7 @@ func newEncoder(format string) zapcore.Encoder {
encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
var encoder zapcore.Encoder
if format == "text" {
encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder
encoder = zapcore.NewConsoleEncoder(encCfg)

View File

@@ -16,8 +16,8 @@ echo "Building k3k... [cli os/arch: $(go env GOOS)/$(go env GOARCH)]"
echo "Current TAG: ${VERSION} "
export CGO_ENABLED=0
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3k
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3k-kubelet ./k3k-kubelet
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]+"${build_args[@]}"}" -o bin/k3k
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]+"${build_args[@]}"}" -o bin/k3k-kubelet ./k3k-kubelet
# build the cli for the local OS and ARCH
go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3kcli ./cli
go build -ldflags="${LDFLAGS}" "${build_args[@]+"${build_args[@]}"}" -o bin/k3kcli ./cli

View File

@@ -3,7 +3,7 @@
set -eou pipefail
CONTROLLER_TOOLS_VERSION=v0.16.0
CONTROLLER_TOOLS_VERSION=v0.20.0
# This will return non-zero until all of our objects in ./pkg/apis can generate valid crds.
# allowDangerousTypes is needed for struct that use floats
@@ -14,7 +14,6 @@ go run sigs.k8s.io/controller-tools/cmd/controller-gen@${CONTROLLER_TOOLS_VERSIO
# add the 'helm.sh/resource-policy: keep' annotation to the CRDs
for f in ./charts/k3k/templates/crds/*.yaml; do
sed -i '0,/^[[:space:]]*annotations:/s/^[[:space:]]*annotations:/&\n helm.sh\/resource-policy: keep/' "$f"
echo "Validating $f"
yq . "$f" > /dev/null
echo "Annotating $f"
yq -c -i '.metadata.annotations["helm.sh/resource-policy"] = "keep"' "$f"
done

View File

@@ -18,6 +18,7 @@ SUBCOMMAND_FILES=(
"$DOCS_DIR/k3kcli_cluster_create.md"
"$DOCS_DIR/k3kcli_cluster_delete.md"
"$DOCS_DIR/k3kcli_cluster_list.md"
"$DOCS_DIR/k3kcli_cluster_update.md"
"$DOCS_DIR/k3kcli_kubeconfig.md"
"$DOCS_DIR/k3kcli_kubeconfig_generate.md"
"$DOCS_DIR/k3kcli_policy.md"

View File

@@ -11,6 +11,7 @@ import (
v1 "k8s.io/api/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/policy"
. "github.com/onsi/ginkgo/v2"
@@ -41,7 +42,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
It("can get the version", func() {
stdout, _, err := K3kcli("--version")
Expect(err).To(Not(HaveOccurred()))
Expect(stdout).To(ContainSubstring("k3kcli version v"))
Expect(stdout).To(ContainSubstring("k3kcli version "))
})
When("trying the cluster commands", func() {
@@ -53,13 +54,14 @@ var _ = When("using the k3kcli", Label("cli"), func() {
)
clusterName := "cluster-" + rand.String(5)
clusterNamespace := "k3k-" + clusterName
namespace := NewNamespace()
clusterNamespace := namespace.Name
DeferCleanup(func() {
DeleteNamespaces(clusterNamespace)
DeleteNamespaces(namespace.Name)
})
_, stderr, err = K3kcli("cluster", "create", clusterName)
_, stderr, err = K3kcli("cluster", "create", "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
@@ -68,7 +70,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
Expect(stderr).To(BeEmpty())
Expect(stdout).To(ContainSubstring(clusterNamespace))
_, stderr, err = K3kcli("cluster", "delete", clusterName)
_, stderr, err = K3kcli("cluster", "delete", "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring(`Deleting '%s' cluster in namespace '%s'`, clusterName, clusterNamespace))
@@ -76,6 +78,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
Eventually(func() string {
stdout, stderr, err := K3kcli("cluster", "list", "-n", clusterNamespace)
Expect(err).To(Not(HaveOccurred()), string(stderr))
return stdout + stderr
}).
WithTimeout(time.Second * 5).
@@ -90,13 +93,14 @@ var _ = When("using the k3kcli", Label("cli"), func() {
)
clusterName := "cluster-" + rand.String(5)
clusterNamespace := "k3k-" + clusterName
namespace := NewNamespace()
clusterNamespace := namespace.Name
DeferCleanup(func() {
DeleteNamespaces(clusterNamespace)
})
_, stderr, err = K3kcli("cluster", "create", "--version", "v1.33.6-k3s1", clusterName)
_, stderr, err = K3kcli("cluster", "create", "--version", "v1.35.2-k3s1", clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
})
@@ -138,10 +142,8 @@ var _ = When("using the k3kcli", Label("cli"), func() {
err error
)
namespaceName := "ns-" + rand.String(5)
_, _, err = Kubectl("create", "namespace", namespaceName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
namespace := NewNamespace()
namespaceName := namespace.Name
DeferCleanup(func() {
DeleteNamespaces(namespaceName)
@@ -163,6 +165,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
})
var ns v1.Namespace
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(ns.Name).To(Equal(namespaceName))
@@ -201,6 +204,188 @@ var _ = When("using the k3kcli", Label("cli"), func() {
})
})
When("trying the cluster update commands", func() {
It("can update a cluster's server count", func() {
var (
stderr string
err error
)
clusterName := "cluster-" + rand.String(5)
namespace := NewNamespace()
clusterNamespace := namespace.Name
DeferCleanup(func() {
DeleteNamespaces(clusterNamespace)
})
// Create the cluster first
_, stderr, err = K3kcli("cluster", "create", "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
// Update the cluster server count
_, stderr, err = K3kcli("cluster", "update", "-y", "--servers", "2", "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("Updating cluster"))
// Verify the cluster state was actually updated
var cluster v1beta1.Cluster
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.Servers).To(Not(BeNil()))
Expect(*cluster.Spec.Servers).To(Equal(int32(2)))
})
It("can update a cluster's version", func() {
var (
stderr string
err error
)
clusterName := "cluster-" + rand.String(5)
namespace := NewNamespace()
clusterNamespace := namespace.Name
DeferCleanup(func() {
DeleteNamespaces(clusterNamespace)
})
// Create the cluster with initial version
_, stderr, err = K3kcli("cluster", "create", "--version", k3sOldVersion, "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
// Update the cluster version
_, stderr, err = K3kcli("cluster", "update", "-y", "--version", k3sVersion, "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("Updating cluster"))
// Verify the cluster state was actually updated
var cluster v1beta1.Cluster
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.Version).To(Equal(k3sVersion))
})
It("fails to downgrade cluster version", func() {
var (
stderr string
err error
)
clusterName := "cluster-" + rand.String(5)
namespace := NewNamespace()
clusterNamespace := namespace.Name
DeferCleanup(func() {
DeleteNamespaces(clusterNamespace)
})
// Create the cluster with a version
_, stderr, err = K3kcli("cluster", "create", "--version", k3sVersion, "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
// Attempt to downgrade should fail
_, stderr, err = K3kcli("cluster", "update", "-y", "--version", k3sOldVersion, "--namespace", clusterNamespace, clusterName)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("downgrading cluster version is not supported"))
// Verify the cluster version was NOT changed
var cluster v1beta1.Cluster
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.Version).To(Equal(k3sVersion))
})
It("fails to update a non-existent cluster", func() {
var (
stderr string
err error
)
// Attempt to update a cluster that doesn't exist
_, stderr, err = K3kcli("cluster", "update", "-y", "--servers", "2", "non-existent-cluster")
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("failed to fetch cluster"))
})
It("can update a cluster's labels", func() {
var (
stderr string
err error
)
clusterName := "cluster-" + rand.String(5)
namespace := NewNamespace()
clusterNamespace := namespace.Name
DeferCleanup(func() {
DeleteNamespaces(clusterNamespace)
})
// Create the cluster first
_, stderr, err = K3kcli("cluster", "create", "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
// Update the cluster with labels
_, stderr, err = K3kcli("cluster", "update", "-y", "--labels", "env=test", "--labels", "team=dev", "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("Updating cluster"))
// Verify the cluster labels were actually updated
var cluster v1beta1.Cluster
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Labels).To(HaveKeyWithValue("env", "test"))
Expect(cluster.Labels).To(HaveKeyWithValue("team", "dev"))
})
It("can update a cluster's annotations", func() {
var (
stderr string
err error
)
clusterName := "cluster-" + rand.String(5)
namespace := NewNamespace()
clusterNamespace := namespace.Name
DeferCleanup(func() {
DeleteNamespaces(clusterNamespace)
})
// Create the cluster first
_, stderr, err = K3kcli("cluster", "create", "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
// Update the cluster with annotations
_, stderr, err = K3kcli("cluster", "update", "-y", "--annotations", "description=test-cluster", "--annotations", "owner=qa-team", "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("Updating cluster"))
// Verify the cluster annotations were actually updated
var cluster v1beta1.Cluster
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Annotations).To(HaveKeyWithValue("description", "test-cluster"))
Expect(cluster.Annotations).To(HaveKeyWithValue("owner", "qa-team"))
})
})
When("trying the kubeconfig command", func() {
It("can generate a kubeconfig", func() {
var (
@@ -209,21 +394,22 @@ var _ = When("using the k3kcli", Label("cli"), func() {
)
clusterName := "cluster-" + rand.String(5)
clusterNamespace := "k3k-" + clusterName
namespace := NewNamespace()
clusterNamespace := namespace.Name
DeferCleanup(func() {
DeleteNamespaces(clusterNamespace)
})
_, stderr, err = K3kcli("cluster", "create", clusterName)
_, stderr, err = K3kcli("cluster", "create", "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
_, stderr, err = K3kcli("kubeconfig", "generate", "--name", clusterName)
_, stderr, err = K3kcli("kubeconfig", "generate", "--namespace", clusterNamespace, "--name", clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
_, stderr, err = K3kcli("cluster", "delete", clusterName)
_, stderr, err = K3kcli("cluster", "delete", "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring(`Deleting '%s' cluster in namespace '%s'`, clusterName, clusterNamespace))
})

View File

@@ -0,0 +1,235 @@
package k3k_test
import (
"context"
"os"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
const (
addonsTestsLabel = "addons"
addonsSecretName = "k3s-addons"
secretMountManifestMountPath = "/var/lib/rancher/k3s/server/manifests/nginx.yaml"
addonManifestMountPath = "/var/lib/rancher/k3s/server/manifests/k3s-addons/nginx.yaml"
)
var _ = When("a cluster with secretMounts configuration is used to load addons", Label("e2e"), Label(addonsTestsLabel), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
// Create the addon secret
err := createAddonSecret(ctx, namespace.Name)
Expect(err).ToNot(HaveOccurred())
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
cluster.Spec.SecretMounts = []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: addonsSecretName,
},
MountPath: secretMountManifestMountPath,
SubPath: "nginx.yaml",
},
}
CreateCluster(cluster)
virtualClient, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: virtualClient,
}
})
It("will load the addon manifest in server pod", func() {
ctx := context.Background()
serverPods := listServerPods(ctx, virtualCluster)
Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
addonContent, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, secretMountManifestMountPath)
Expect(err).To(Not(HaveOccurred()))
addonTestFile, err := os.ReadFile("testdata/addons/nginx.yaml")
Expect(err).To(Not(HaveOccurred()))
Expect(addonContent).To(Equal(addonTestFile))
})
It("will deploy the addon pod in the virtual cluster", func() {
ctx := context.Background()
Eventually(func(g Gomega) {
nginxPod, err := virtualCluster.Client.CoreV1().Pods("default").Get(ctx, "nginx-addon", metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(nginxPod.Status.Phase).To(Equal(v1.PodRunning))
}).
WithTimeout(time.Minute * 3).
WithPolling(time.Second * 5).
Should(Succeed())
})
})
var _ = When("a cluster with addon configuration is used with addons secret in the same namespace", Label("e2e"), Label(addonsTestsLabel), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
// Create the addon secret
err := createAddonSecret(ctx, namespace.Name)
Expect(err).ToNot(HaveOccurred())
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
cluster.Spec.Addons = []v1beta1.Addon{
{
SecretNamespace: namespace.Name,
SecretRef: addonsSecretName,
},
}
CreateCluster(cluster)
virtualClient, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: virtualClient,
}
serverPods := listServerPods(ctx, virtualCluster)
Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
addonContent, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, addonManifestMountPath)
Expect(err).To(Not(HaveOccurred()))
addonTestFile, err := os.ReadFile("testdata/addons/nginx.yaml")
Expect(err).To(Not(HaveOccurred()))
Expect(addonContent).To(Equal(addonTestFile))
Eventually(func(g Gomega) {
nginxPod, err := virtualCluster.Client.CoreV1().Pods("default").Get(ctx, "nginx-addon", metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(nginxPod.Status.Phase).To(Equal(v1.PodRunning))
}).
WithTimeout(time.Minute * 3).
WithPolling(time.Second * 5).
Should(Succeed())
})
})
var _ = When("a cluster with addon configuration is used with addons secret in the different namespace", Label("e2e"), Label(addonsTestsLabel), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
secretNamespace := NewNamespace()
// Create the addon secret
err := createAddonSecret(ctx, secretNamespace.Name)
Expect(err).ToNot(HaveOccurred())
DeferCleanup(func() {
DeleteNamespaces(namespace.Name, secretNamespace.Name)
})
cluster := NewCluster(namespace.Name)
cluster.Spec.Addons = []v1beta1.Addon{
{
SecretNamespace: secretNamespace.Name,
SecretRef: addonsSecretName,
},
}
CreateCluster(cluster)
virtualClient, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: virtualClient,
}
})
It("will load the addon manifest in server pod and deploys the pod", func() {
ctx := context.Background()
serverPods := listServerPods(ctx, virtualCluster)
Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
addonContent, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, addonManifestMountPath)
Expect(err).To(Not(HaveOccurred()))
addonTestFile, err := os.ReadFile("testdata/addons/nginx.yaml")
Expect(err).To(Not(HaveOccurred()))
Expect(addonContent).To(Equal(addonTestFile))
Eventually(func(g Gomega) {
nginxPod, err := virtualCluster.Client.CoreV1().Pods("default").Get(ctx, "nginx-addon", metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(nginxPod.Status.Phase).To(Equal(v1.PodRunning))
}).
WithTimeout(time.Minute * 3).
WithPolling(time.Second * 5).
Should(Succeed())
})
})
func createAddonSecret(ctx context.Context, namespace string) error {
addonContent, err := os.ReadFile("testdata/addons/nginx.yaml")
if err != nil {
return err
}
secret := &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: addonsSecretName,
Namespace: namespace,
},
Data: map[string][]byte{
"nginx.yaml": addonContent,
},
}
return k8sClient.Create(ctx, secret)
}

View File

@@ -5,8 +5,6 @@ import (
"os"
"strings"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
@@ -38,18 +36,24 @@ var _ = When("a cluster with custom certificates is installed with individual ce
}
for _, certName := range certList {
var cert, key []byte
var err error
var (
cert, key []byte
err error
)
filePathPrefix := ""
certfile := certName
if strings.HasPrefix(certName, "etcd") {
filePathPrefix = "etcd/"
certfile = strings.TrimPrefix(certName, "etcd-")
}
if !strings.Contains(certName, "service") {
cert, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".crt")
Expect(err).To(Not(HaveOccurred()))
}
key, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".key")
Expect(err).To(Not(HaveOccurred()))
@@ -98,12 +102,10 @@ var _ = When("a cluster with custom certificates is installed with individual ce
It("will load the custom certs in the server pod", func() {
ctx := context.Background()
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
serverPods := listServerPods(ctx, virtualCluster)
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
// check server-ca.crt
serverCACrtPath := "/var/lib/rancher/k3s/server/tls/server-ca.crt"

View File

@@ -0,0 +1,44 @@
package k3k_test
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("creating a shared mode cluster", Label(e2eTestLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
})
It("creates nodes with the worker role", func() {
Eventually(func(g Gomega) {
nodes, err := virtualCluster.Client.CoreV1().Nodes().List(GinkgoT().Context(), metav1.ListOptions{})
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(nodes.Items).To(HaveLen(1))
g.Expect(nodes.Items[0].Labels).To(HaveKeyWithValue("node-role.kubernetes.io/worker", "true"))
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(Succeed())
})
})

View File

@@ -59,12 +59,10 @@ var _ = When("an ephemeral cluster is installed", Label(e2eTestLabel), Label(per
_, err := virtualCluster.Client.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
serverPods := listServerPods(ctx, virtualCluster)
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
@@ -75,10 +73,10 @@ var _ = When("an ephemeral cluster is installed", Label(e2eTestLabel), Label(per
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
serverPods := listServerPods(ctx, virtualCluster)
Expect(len(serverPods)).To(Equal(1))
return serverPods[0].DeletionTimestamp
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
@@ -90,7 +88,9 @@ var _ = When("an ephemeral cluster is installed", Label(e2eTestLabel), Label(per
Eventually(func() bool {
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
var unknownAuthorityErr x509.UnknownAuthorityError
return errors.As(err, &unknownAuthorityErr)
}).
WithTimeout(time.Minute * 2).
@@ -102,6 +102,7 @@ var _ = When("an ephemeral cluster is installed", Label(e2eTestLabel), Label(per
Eventually(func() error {
virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster)
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(time.Minute * 2).

View File

@@ -89,6 +89,7 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
}
ctx := context.Background()
var err error
virtualPod, err = virtualCluster.Client.CoreV1().Pods(p.Namespace).Create(ctx, p, metav1.CreateOptions{})
@@ -110,6 +111,7 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
g.Expect(envVars).NotTo(BeEmpty())
var found bool
for _, envVar := range envVars {
if envVar.Name == "POD_NAME" {
found = true
@@ -117,9 +119,11 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
g.Expect(envVars[0].ValueFrom).NotTo(BeNil())
g.Expect(envVars[0].ValueFrom.FieldRef).NotTo(BeNil())
g.Expect(envVars[0].ValueFrom.FieldRef.FieldPath).To(Equal("metadata.name"))
break
}
}
g.Expect(found).To(BeTrue())
containerStatuses := pod.Status.ContainerStatuses
@@ -148,15 +152,18 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
g.Expect(envVars).NotTo(BeEmpty())
var found bool
for _, envVar := range envVars {
if envVar.Name == "POD_NAME" {
found = true
g.Expect(envVar.ValueFrom).To(BeNil())
g.Expect(envVar.Value).To(Equal(virtualPod.Name))
break
}
}
g.Expect(found).To(BeTrue())
containerStatuses := pod.Status.ContainerStatuses

View File

@@ -0,0 +1,200 @@
package k3k_test
import (
"context"
"time"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/policy"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a shared mode cluster is created in a namespace with a policy", Ordered, Label(e2eTestLabel), func() {
var (
ctx context.Context
virtualCluster *VirtualCluster
vcp *v1beta1.VirtualClusterPolicy
)
BeforeAll(func() {
ctx = context.Background()
// 1. Create StorageClasses in host
storageClassEnabled := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-policy-enabled-",
Labels: map[string]string{
cluster.SyncEnabledLabelKey: "true",
},
},
Provisioner: "my-provisioner",
}
var err error
storageClassEnabled, err = k8s.StorageV1().StorageClasses().Create(ctx, storageClassEnabled, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
storageClassDisabled := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-policy-disabled-",
Labels: map[string]string{
cluster.SyncEnabledLabelKey: "false",
},
},
Provisioner: "my-provisioner",
}
storageClassDisabled, err = k8s.StorageV1().StorageClasses().Create(ctx, storageClassDisabled, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
// 2. Create VirtualClusterPolicy with StorageClass sync enabled
vcp = &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "vcp-sync-sc-",
},
Spec: v1beta1.VirtualClusterPolicySpec{
Sync: &v1beta1.SyncConfig{
StorageClasses: v1beta1.StorageClassSyncConfig{
Enabled: true,
},
},
},
}
err = k8sClient.Create(ctx, vcp)
Expect(err).To(Not(HaveOccurred()))
// 3. Create Namespace with policy label
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ns-vcp-",
Labels: map[string]string{
policy.PolicyNameLabelKey: vcp.Name,
},
},
}
// We use the k8s clientset for namespace creation to stay consistent with other tests
ns, err = k8s.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
// 4. Create VirtualCluster in that namespace
// The cluster doesn't have storage class sync enabled in its spec
clusterObj := NewCluster(ns.Name)
clusterObj.Spec.Sync = &v1beta1.SyncConfig{
StorageClasses: v1beta1.StorageClassSyncConfig{
Enabled: false,
},
}
clusterObj.Spec.Expose.NodePort.ServerPort = ptr.To[int32](30000)
CreateCluster(clusterObj)
client, restConfig, kubeconfig := NewVirtualK8sClientAndKubeconfig(clusterObj)
virtualCluster = &VirtualCluster{
Cluster: clusterObj,
RestConfig: restConfig,
Client: client,
Kubeconfig: kubeconfig,
}
DeferCleanup(func() {
DeleteNamespaces(ns.Name)
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassEnabled.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassDisabled.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
err = k8sClient.Delete(ctx, vcp)
Expect(err).To(Not(HaveOccurred()))
})
})
It("has the storage classes sync enabled from the policy", func() {
Eventually(func(g Gomega) {
key := client.ObjectKeyFromObject(virtualCluster.Cluster)
g.Expect(k8sClient.Get(ctx, key, virtualCluster.Cluster)).To(Succeed())
g.Expect(virtualCluster.Cluster.Status.Policy).To(Not(BeNil()))
g.Expect(virtualCluster.Cluster.Status.Policy.Sync).To(Not(BeNil()))
g.Expect(virtualCluster.Cluster.Status.Policy.Sync.StorageClasses.Enabled).To(BeTrue())
}).
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Succeed())
})
It("will sync host storage classes with the sync enabled in the host", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
g.Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
// We only care about the storage classes we created for this test to avoid noise
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "true" {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))
}
}
}).
WithPolling(time.Second).
WithTimeout(time.Second * 60).
Should(Succeed())
})
It("will not sync host storage classes with the sync disabled in the host", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
g.Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "false" {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
g.Expect(err).To(HaveOccurred())
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}
}
}).
WithPolling(time.Second).
WithTimeout(time.Second * 60).
Should(Succeed())
})
When("disabling the storage class sync in the policy", Ordered, func() {
BeforeAll(func() {
original := vcp.DeepCopy()
vcp.Spec.Sync.StorageClasses.Enabled = false
err := k8sClient.Patch(ctx, vcp, client.MergeFrom(original))
Expect(err).To(Not(HaveOccurred()))
})
It("will remove the synced storage classes from the virtual cluster", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
g.Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "true" {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
g.Expect(err).To(HaveOccurred())
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}
}
}).
WithPolling(time.Second).
WithTimeout(time.Second * 60).
Should(Succeed())
})
})
})

View File

@@ -0,0 +1,163 @@
package k3k_test
import (
"context"
"os"
"time"
"k8s.io/kubernetes/pkg/api/v1/pod"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/policy"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a cluster with private registry configuration is used", Label("e2e"), Label(registryTestsLabel), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
ctx := context.Background()
vcp := &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "policy-",
},
Spec: v1beta1.VirtualClusterPolicySpec{
AllowedMode: v1beta1.VirtualClusterMode,
DisableNetworkPolicy: true,
},
}
Expect(k8sClient.Create(ctx, vcp)).To(Succeed())
namespace := NewNamespace()
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(namespace), namespace)
Expect(err).To(Not(HaveOccurred()))
namespace.Labels = map[string]string{
policy.PolicyNameLabelKey: vcp.Name,
}
Expect(k8sClient.Update(ctx, namespace)).To(Succeed())
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
Expect(k8sClient.Delete(ctx, vcp)).To(Succeed())
})
err = privateRegistry(ctx, namespace.Name)
Expect(err).ToNot(HaveOccurred())
cluster := NewCluster(namespace.Name)
// configure the cluster with the private registry secrets using SecretMounts
// Using subPath allows mounting individual files while keeping parent directories writable
cluster.Spec.SecretMounts = []v1beta1.SecretMount{
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "k3s-registry-config",
},
MountPath: "/etc/rancher/k3s/registries.yaml",
SubPath: "registries.yaml",
},
{
SecretVolumeSource: v1.SecretVolumeSource{
SecretName: "private-registry-ca-cert",
},
MountPath: "/etc/rancher/k3s/tls/ca.crt",
SubPath: "tls.crt",
},
}
cluster.Spec.Mode = v1beta1.VirtualClusterMode
// airgap the k3k-server pod
err = buildRegistryNetPolicy(ctx, cluster.Namespace)
Expect(err).ToNot(HaveOccurred())
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
})
It("will be load the registries.yaml and crts in server pod", func() {
ctx := context.Background()
serverPods := listServerPods(ctx, virtualCluster)
Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
// check registries.yaml
registriesConfigPath := "/etc/rancher/k3s/registries.yaml"
registriesConfig, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, registriesConfigPath)
Expect(err).To(Not(HaveOccurred()))
registriesConfigTestFile, err := os.ReadFile("testdata/registry/registries.yaml")
Expect(err).To(Not(HaveOccurred()))
Expect(registriesConfig).To(Equal(registriesConfigTestFile))
// check ca.crt
CACrtPath := "/etc/rancher/k3s/tls/ca.crt"
CACrt, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, CACrtPath)
Expect(err).To(Not(HaveOccurred()))
CACrtTestFile, err := os.ReadFile("testdata/registry/certs/ca.crt")
Expect(err).To(Not(HaveOccurred()))
Expect(CACrt).To(Equal(CACrtTestFile))
})
It("will only pull images from mirrored docker.io registry", func() {
ctx := context.Background()
// make sure that any pod using docker.io mirror works
virtualCluster.NewNginxPod("")
// creating a pod with image that uses any registry other than docker.io should fail
// for example public.ecr.aws/docker/library/alpine:latest
alpinePod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "alpine-",
Namespace: "default",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "alpine",
Image: "public.ecr.aws/docker/library/alpine:latest",
}},
},
}
By("Creating Alpine Pod and making sure its failing to start")
var err error
alpinePod, err = virtualCluster.Client.CoreV1().Pods(alpinePod.Namespace).Create(ctx, alpinePod, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
// check that the alpine Pod is failing to pull the image
Eventually(func(g Gomega) {
alpinePod, err = virtualCluster.Client.CoreV1().Pods(alpinePod.Namespace).Get(ctx, alpinePod.Name, metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))
status, _ := pod.GetContainerStatus(alpinePod.Status.ContainerStatuses, "alpine")
state := status.State.Waiting
g.Expect(state).NotTo(BeNil())
g.Expect(state.Reason).To(BeEquivalentTo("ImagePullBackOff"))
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(Succeed())
})
})

View File

@@ -8,6 +8,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
schedv1 "k8s.io/api/scheduling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
@@ -99,6 +100,100 @@ var _ = When("a cluster's status is tracked", Label(e2eTestLabel), Label(statusT
WithPolling(time.Second * 5).
Should(Succeed())
})
It("created with field controlled from a policy", func() {
ctx := context.Background()
priorityClass := &schedv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pc-",
},
Value: 100,
}
Expect(k8sClient.Create(ctx, priorityClass)).To(Succeed())
clusterObj := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "status-cluster-",
Namespace: namespace.Name,
},
Spec: v1beta1.ClusterSpec{
PriorityClass: priorityClass.Name,
},
}
Expect(k8sClient.Create(ctx, clusterObj)).To(Succeed())
DeferCleanup(func() {
Expect(k8sClient.Delete(ctx, priorityClass)).To(Succeed())
})
clusterKey := client.ObjectKeyFromObject(clusterObj)
// Check for the initial status to be set
Eventually(func(g Gomega) {
err := k8sClient.Get(ctx, clusterKey, clusterObj)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterProvisioning))
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(Equal(metav1.ConditionFalse))
g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioning))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Second * 20).
Should(Succeed())
// Check for the status to be updated to Ready
Eventually(func(g Gomega) {
err := k8sClient.Get(ctx, clusterKey, clusterObj)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterReady))
g.Expect(clusterObj.Status.Policy).To(Not(BeNil()))
g.Expect(clusterObj.Status.Policy.Name).To(Equal(vcp.Name))
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(Equal(metav1.ConditionTrue))
g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioned))
}).
WithTimeout(time.Minute * 3).
WithPolling(time.Second * 5).
Should(Succeed())
// update policy
priorityClassVCP := &schedv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pc-",
},
Value: 100,
}
Expect(k8sClient.Create(ctx, priorityClassVCP)).To(Succeed())
DeferCleanup(func() {
Expect(k8sClient.Delete(ctx, priorityClassVCP)).To(Succeed())
})
vcp.Spec.DefaultPriorityClass = priorityClassVCP.Name
Expect(k8sClient.Update(ctx, vcp)).To(Succeed())
// Check for the status to be updated to Ready
Eventually(func(g Gomega) {
err := k8sClient.Get(ctx, clusterKey, clusterObj)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(clusterObj.Status.Policy).To(Not(BeNil()))
g.Expect(clusterObj.Status.Policy.PriorityClass).To(Not(BeNil()))
g.Expect(*clusterObj.Status.Policy.PriorityClass).To(Equal(priorityClassVCP.Name))
g.Expect(clusterObj.Spec.PriorityClass).To(Equal(priorityClass.Name))
}).
WithTimeout(time.Minute * 3).
WithPolling(time.Second * 5).
Should(Succeed())
})
})
Context("and the cluster has validation errors", func() {

View File

@@ -0,0 +1,194 @@
package k3k_test
import (
"context"
"time"
"sigs.k8s.io/controller-runtime/pkg/client"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/controller/cluster"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a shared mode cluster is created", Ordered, Label(e2eTestLabel), func() {
var (
ctx context.Context
virtualCluster *VirtualCluster
)
BeforeAll(func() {
ctx = context.Background()
virtualCluster = NewVirtualCluster()
storageClassEnabled := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-",
Labels: map[string]string{
cluster.SyncEnabledLabelKey: "true",
},
},
Provisioner: "my-provisioner",
}
storageClassEnabled, err := k8s.StorageV1().StorageClasses().Create(ctx, storageClassEnabled, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
storageClassDisabled := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-",
Labels: map[string]string{
cluster.SyncEnabledLabelKey: "false",
},
},
Provisioner: "my-provisioner",
}
storageClassDisabled, err = k8s.StorageV1().StorageClasses().Create(ctx, storageClassDisabled, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
DeferCleanup(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassEnabled.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassDisabled.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
})
})
It("has disabled the storage classes sync", func() {
Expect(virtualCluster.Cluster.Spec.Sync).To(Not(BeNil()))
Expect(virtualCluster.Cluster.Spec.Sync.StorageClasses.Enabled).To(BeFalse())
})
It("doesn't have storage classes", func() {
virtualStorageClasses, err := virtualCluster.Client.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
Expect(virtualStorageClasses.Items).To(HaveLen(0))
})
It("has some storage classes in the host", func() {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
Expect(hostStorageClasses.Items).To(Not(HaveLen(0)))
})
It("can create storage classes in the virtual cluster", func() {
storageClass := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-",
},
Provisioner: "my-provisioner",
}
storageClass, err := virtualCluster.Client.StorageV1().StorageClasses().Create(ctx, storageClass, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
virtualStorageClasses, err := virtualCluster.Client.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
Expect(virtualStorageClasses.Items).To(HaveLen(1))
Expect(virtualStorageClasses.Items[0].Name).To(Equal(storageClass.Name))
})
When("enabling the storage class sync", Ordered, func() {
BeforeAll(func() {
GinkgoWriter.Println("Enabling the storage class sync")
original := virtualCluster.Cluster.DeepCopy()
virtualCluster.Cluster.Spec.Sync.StorageClasses.Enabled = true
err := k8sClient.Patch(ctx, virtualCluster.Cluster, client.MergeFrom(original))
Expect(err).To(Not(HaveOccurred()))
Eventually(func(g Gomega) {
key := client.ObjectKeyFromObject(virtualCluster.Cluster)
g.Expect(k8sClient.Get(ctx, key, virtualCluster.Cluster)).To(Succeed())
g.Expect(virtualCluster.Cluster.Spec.Sync.StorageClasses.Enabled).To(BeTrue())
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(Succeed())
})
It("will sync host storage classes with the sync enabled", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
if syncEnabled, found := hostSC.Labels[cluster.SyncEnabledLabelKey]; !found || syncEnabled == "true" {
g.Expect(err).To(Not(HaveOccurred()))
}
}
}).
MustPassRepeatedly(5).
WithPolling(time.Second).
WithTimeout(time.Second * 30).
Should(Succeed())
})
It("will not sync host storage classes with the sync disabled", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "false" {
g.Expect(err).To(HaveOccurred())
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}
}
}).
MustPassRepeatedly(5).
WithPolling(time.Second).
WithTimeout(time.Second * 30).
Should(Succeed())
})
})
When("editing a synced storage class in the host cluster", Ordered, func() {
var syncedStorageClass *storagev1.StorageClass
BeforeAll(func() {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
if syncEnabled, found := hostSC.Labels[cluster.SyncEnabledLabelKey]; !found || syncEnabled == "true" {
syncedStorageClass = &hostSC
break
}
}
Expect(syncedStorageClass).To(Not(BeNil()))
syncedStorageClass.Labels["foo"] = "bar"
_, err = k8s.StorageV1().StorageClasses().Update(ctx, syncedStorageClass, metav1.UpdateOptions{})
Expect(err).To(Not(HaveOccurred()))
})
It("will update the synced storage class in the virtual cluster", func() {
Eventually(func(g Gomega) {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, syncedStorageClass.Name, metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(syncedStorageClass.Labels).Should(HaveKeyWithValue("foo", "bar"))
}).
MustPassRepeatedly(5).
WithPolling(time.Second).
WithTimeout(time.Second * 30).
Should(Succeed())
})
})
})

View File

@@ -79,6 +79,7 @@ var _ = When("a shared mode cluster is created", Ordered, Label(e2eTestLabel), f
}
var err error
virtualService, err = virtualCluster.Client.CoreV1().Services("default").Create(ctx, virtualService, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
})

View File

@@ -20,7 +20,9 @@ import (
var _ = When("a shared mode cluster update its envs", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
@@ -174,7 +176,9 @@ var _ = When("a shared mode cluster update its envs", Label(e2eTestLabel), Label
var _ = When("a shared mode cluster update its server args", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
@@ -238,7 +242,9 @@ var _ = When("a shared mode cluster update its server args", Label(e2eTestLabel)
var _ = When("a virtual mode cluster update its envs", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
@@ -389,7 +395,9 @@ var _ = When("a virtual mode cluster update its envs", Label(e2eTestLabel), Labe
var _ = When("a virtual mode cluster update its server args", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
@@ -459,6 +467,7 @@ var _ = When("a shared mode cluster update its version", Label(e2eTestLabel), La
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
@@ -469,8 +478,8 @@ var _ = When("a shared mode cluster update its version", Label(e2eTestLabel), La
cluster := NewCluster(namespace.Name)
// Add initial version
cluster.Spec.Version = "v1.31.13-k3s1"
// Add initial old version
cluster.Spec.Version = k3sOldVersion
// need to enable persistence for this
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
@@ -501,13 +510,14 @@ var _ = When("a shared mode cluster update its version", Label(e2eTestLabel), La
It("will update server version when version spec is updated", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
Expect(err).NotTo(HaveOccurred())
// update cluster version
cluster.Spec.Version = "v1.32.8-k3s1"
cluster.Spec.Version = k3sVersion
err = k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
@@ -530,6 +540,7 @@ var _ = When("a shared mode cluster update its version", Label(e2eTestLabel), La
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
_, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
@@ -545,6 +556,7 @@ var _ = When("a virtual mode cluster update its version", Label(e2eTestLabel), L
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
@@ -555,8 +567,8 @@ var _ = When("a virtual mode cluster update its version", Label(e2eTestLabel), L
cluster := NewCluster(namespace.Name)
// Add initial version
cluster.Spec.Version = "v1.31.13-k3s1"
// Add initial old version
cluster.Spec.Version = k3sOldVersion
cluster.Spec.Mode = v1beta1.VirtualClusterMode
cluster.Spec.Agents = ptr.To[int32](1)
@@ -589,15 +601,17 @@ var _ = When("a virtual mode cluster update its version", Label(e2eTestLabel), L
nginxPod, _ = virtualCluster.NewNginxPod("")
})
It("will update server version when version spec is updated", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
Expect(err).NotTo(HaveOccurred())
// update cluster version
cluster.Spec.Version = "v1.32.8-k3s1"
cluster.Spec.Version = k3sVersion
err = k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
@@ -646,6 +660,7 @@ var _ = When("a shared mode cluster scales up servers", Label(e2eTestLabel), Lab
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
@@ -688,6 +703,7 @@ var _ = When("a shared mode cluster scales up servers", Label(e2eTestLabel), Lab
})
It("will scale up server pods", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
@@ -716,6 +732,7 @@ var _ = When("a shared mode cluster scales up servers", Label(e2eTestLabel), Lab
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
@@ -731,6 +748,7 @@ var _ = When("a shared mode cluster scales down servers", Label(e2eTestLabel), L
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
@@ -777,6 +795,7 @@ var _ = When("a shared mode cluster scales down servers", Label(e2eTestLabel), L
})
It("will scale down server pods", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
@@ -803,6 +822,7 @@ var _ = When("a shared mode cluster scales down servers", Label(e2eTestLabel), L
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
_, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
@@ -818,6 +838,7 @@ var _ = When("a virtual mode cluster scales up servers", Label(e2eTestLabel), La
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
@@ -860,6 +881,7 @@ var _ = When("a virtual mode cluster scales up servers", Label(e2eTestLabel), La
})
It("will scale up server pods", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
@@ -888,6 +910,7 @@ var _ = When("a virtual mode cluster scales up servers", Label(e2eTestLabel), La
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
@@ -903,6 +926,7 @@ var _ = When("a virtual mode cluster scales down servers", Label(e2eTestLabel),
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
@@ -952,6 +976,7 @@ var _ = When("a virtual mode cluster scales down servers", Label(e2eTestLabel),
By("Scaling down cluster")
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)

View File

@@ -233,6 +233,7 @@ func NewVirtualK8sClientAndConfig(cluster *v1beta1.Cluster) (*kubernetes.Clients
kubeletAltName := fmt.Sprintf("k3k-%s-kubelet", cluster.Name)
vKubeconfig.AltNames = certs.AddSANs([]string{hostIP, kubeletAltName})
config, err = vKubeconfig.Generate(ctx, k8sClient, cluster, hostIP, 0)
return err
}).
WithTimeout(time.Minute * 2).
@@ -266,6 +267,7 @@ func NewVirtualK8sClientAndKubeconfig(cluster *v1beta1.Cluster) (*kubernetes.Cli
kubeletAltName := fmt.Sprintf("k3k-%s-kubelet", cluster.Name)
vKubeconfig.AltNames = certs.AddSANs([]string{hostIP, kubeletAltName})
config, err = vKubeconfig.Generate(ctx, k8sClient, cluster, hostIP, 0)
return err
}).
WithTimeout(time.Minute * 2).
@@ -397,26 +399,25 @@ func (c *VirtualCluster) ExecCmd(pod *v1.Pod, command string) (string, string, e
func restartServerPod(ctx context.Context, virtualCluster *VirtualCluster) {
GinkgoHelper()
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
serverPods := listServerPods(ctx, virtualCluster)
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, metav1.DeleteOptions{})
err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
serverPods := listServerPods(ctx, virtualCluster)
Expect(len(serverPods)).To(Equal(1))
return serverPods[0].DeletionTimestamp
}).WithTimeout(60 * time.Second).WithPolling(time.Second * 5).Should(BeNil())
}

9
tests/testdata/addons/nginx.yaml vendored Normal file
View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-addon
namespace: default
spec:
containers:
- name: nginx
image: nginx:latest

33
tests/testdata/registry/certs/ca.crt vendored Normal file
View File

@@ -0,0 +1,33 @@
-----BEGIN CERTIFICATE-----
MIIFuzCCA6OgAwIBAgIUI2MIXZDFl1X+tVkpbeYv78eGB5swDQYJKoZIhvcNAQEL
BQAwbTELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUxvY2FsMQ4wDAYDVQQHDAVMb2Nh
bDESMBAGA1UECgwJUHJpdmF0ZUNBMQwwCgYDVQQLDANEZXYxHDAaBgNVBAMME1By
aXZhdGUtUmVnaXN0cnktQ0EwHhcNMjUxMTI2MTIxMDIyWhcNMzUxMTI0MTIxMDIy
WjBtMQswCQYDVQQGEwJVUzEOMAwGA1UECAwFTG9jYWwxDjAMBgNVBAcMBUxvY2Fs
MRIwEAYDVQQKDAlQcml2YXRlQ0ExDDAKBgNVBAsMA0RldjEcMBoGA1UEAwwTUHJp
dmF0ZS1SZWdpc3RyeS1DQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
AIwwuetZ0bpDmYkdP1HjQpPVjatiQHyedyulKBfPo9/uVE+HVoPe3Ku4QQgSmhKs
hzYE0g7CoM/Kr1EdCYS0RkJhLp+Mfdqwcn2+QXH0PHqdJU3ocutmxRB0xObXyXBv
2Nf+0yKNY36E096wgcJKIl8eNrONmFbPmNl7PjsIvnilQBUSUbfecxUjDcmbZxXq
dSSntMPN/twPi9FzZGMGhZzHD0+Wf+9V0dfF1L1P7rFtmdEtPbBEH6wZFpKl8qzH
O3aXxznLbRSiVZ8PP4zdEKXGcXU8bWZ0NeGPOfgQhsBZvN2auV+esb1McK+7ZzOK
P5dPuzmlCCa8F7+P8wYgHm3W5dCkwsRqvu+ht4Z2sw/Gj1Uot7hunW2jO1PWoegs
W4kGD2EFHqmYU8j3RzxppY0YvZxlSVMiQDHE11ZKHrvu0p7EGsZnAqntAH73jlEg
kt7ZhAS+AMs+evIGAFL5C6dZc8C99uiXiANausrBlVQUk068kR7W2mVy8omet2mQ
DCkmnLJnrxgq5BwPKKvqMDs6dl+idYirxiX33dhildTrkX07DIOwy0nBTdBz8O5s
PIhIA5maVZt8g8w+YpNLEJSMI7NJ1YlgIJ3P9JBOk47hlNp3gw7WlkUxW5XXS1jU
qUWUDHi8grjL9WxSbENXIY4aGIu5ag/xqgQzKGkWcr1nAgMBAAGjUzBRMB0GA1Ud
DgQWBBQYh4H6KFRZXYi9/pzeZUTgxlWQ3jAfBgNVHSMEGDAWgBQYh4H6KFRZXYi9
/pzeZUTgxlWQ3jAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBt
IGJhMJsvDFXWO5pRMjA633k4zquyYBfSt1lTmH5S91SWW8sAickNVvnpBvmsG5lg
GAA1kdF9b7+yrcWGK3TldQobZVpU46MBoiqWPY8tHqbM/0x+UwHHPTwbNa5JLjxT
5ECsZa5wdeACiLt0dFv4CJ8GIcGK7k8QaoxvolEPcxbEpas7bJF9cHNZH3TEwhIb
kC6Q+4+Y2r0pxDW/2uqFpL2RSzk4kJDH3K2y3ywnkSsM6QTDXlaBKijU5bXvrq7v
ZIAX75w+dSHtj4oCWFm/jgVyS+KrAhWbjwxwRMLku0/603OQCAv7c1oEbLh58fLb
zfOOPGkpDvNP5rTwacIyW0P0/GJpmwFjaJYRJgmmCwM7S0qhhJfuVp2d7oZvkkRT
vRpNqpR813ge6T3pnXpdBA9NofTiIsxJ18CGDaHBvDBR+MAsPjmCskIsf9T3fGY/
fAzuYd8qgE2jIuyZzBLIwARq+zKSzBwgLbfnYRprbp62Qi0OQRYOqxgfP0grw29S
k++Wtfd1zr8OCX/8CkC1P85ipzUoF3G7W1Cadn4aesfxvHPKQlRlpHnt7RImwJMZ
k5QP/2igzjjGwvlGb97Br25dgjiJmQlY3IjqNpzt8uIrCK65vQSwpioq7coJWlTZ
feU/+JrEL8lsa4jkMMeYnW4IJTRqq4Aqoe+e+vz8bQ==
-----END CERTIFICATE-----

52
tests/testdata/registry/certs/ca.key vendored Normal file
View File

@@ -0,0 +1,52 @@
-----BEGIN PRIVATE KEY-----
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCMMLnrWdG6Q5mJ
HT9R40KT1Y2rYkB8nncrpSgXz6Pf7lRPh1aD3tyruEEIEpoSrIc2BNIOwqDPyq9R
HQmEtEZCYS6fjH3asHJ9vkFx9Dx6nSVN6HLrZsUQdMTm18lwb9jX/tMijWN+hNPe
sIHCSiJfHjazjZhWz5jZez47CL54pUAVElG33nMVIw3Jm2cV6nUkp7TDzf7cD4vR
c2RjBoWcxw9Pln/vVdHXxdS9T+6xbZnRLT2wRB+sGRaSpfKsxzt2l8c5y20UolWf
Dz+M3RClxnF1PG1mdDXhjzn4EIbAWbzdmrlfnrG9THCvu2czij+XT7s5pQgmvBe/
j/MGIB5t1uXQpMLEar7vobeGdrMPxo9VKLe4bp1toztT1qHoLFuJBg9hBR6pmFPI
90c8aaWNGL2cZUlTIkAxxNdWSh677tKexBrGZwKp7QB+945RIJLe2YQEvgDLPnry
BgBS+QunWXPAvfbol4gDWrrKwZVUFJNOvJEe1tplcvKJnrdpkAwpJpyyZ68YKuQc
Dyir6jA7OnZfonWIq8Yl993YYpXU65F9OwyDsMtJwU3Qc/DubDyISAOZmlWbfIPM
PmKTSxCUjCOzSdWJYCCdz/SQTpOO4ZTad4MO1pZFMVuV10tY1KlFlAx4vIK4y/Vs
UmxDVyGOGhiLuWoP8aoEMyhpFnK9ZwIDAQABAoICACjieQZLTp/84QUc83+FQMBu
kn9+CwKNEII5C2VOWCORlSMQfEm/MCogdU7OZgK2MESvyTcmydFv8gs85a6/CJKJ
VxiO15F0zh8f4mRCb3Tu6Zc8CG/gq+4tr9MG8aeJ5vqvRZIZHAAk6slSPrWT+0w0
Oo3I6LnAl3otuCttVGdJAlRi4FQ4WuW6MGYwnTLGCt3izxQfuokhO4ydE5TRrRvY
7f0vDiaVp7o+5tlDO4ChTy+y+v+yDm6ZbnzcStbaz9u5Tg/r5OcUpNXbk5QYUKeY
JTSkp98uWxxqMeTHpRTp1uvmGNPrKzji1yZZCDL+yabuSNL571OknWRvrdeGfHjr
lK68EnznaXO5aI7/85fVl+eeU+l3lb3iVkKTmGoS9/wbJwaCy92++P9H0hfO96Zj
2ssIRNds1+3/fLrOR9ctkIHQBze/lp3OfPqglVcbx9CH9jfKp0hgC4d6osSd6cqW
bZAyz5AZb/3tiCbEhsRXgXfJ4YhaNCn7Pug9tR6YUAz6KlsfqrwpJODBYnISIO/E
kKze14lwMtYB5F1RV9plb1GeI3uVA0IY9RcENyLF4bDl5GzeiBMPhEQ3ZTCgz2Uy
AtJphdZekqXJivOhS4D8eRFBVSXrOoC3V40QYBM4DYmvlPOxRG2vxpwOrrtlOUY7
ix6tJXUb16YQWCNkMKPZAoIBAQDEsqHsGN1bJDzjci/sRKbrXvdBVvR80qt1yrfF
5CkPkbTZTnTZ8OSPGhy4icoJ0IxoXjhQidMYI/JyDES49sX373ITD9eTLD2KJGL9
kLhoUbeKsC9oFq0hmsprmmjWZ8N6rn9ONE7HEt5PM9HS7sCS8+mzBpaLJoNWgg//
00y1brjj00/g8tbfkTxEfEaeG4Artmeeq7HXm51Na1sft6XHUYTiEJ5DO+/0S45i
orf2GhHxwawoVj/WFZIpjKOA0CmncKJ3VhiVekkGMUvnP5fJaASy//AsssQ86h58
ELUiRO/tecwLI3X4FYklsOO+LqNrh4BbOlkI9fdVbwIIEvHDAoIBAQC2dMg/N5JO
DkzbVQRZIiIsZoCr7Z6mHOywZ5wpPS6E0anOxBaJTpPormX7gqpVcg5lzj3M0uYX
wQywO81dUAQkrCIri0cHnS93/5oWTlcBzVg51DmeTuhDsVlVOmFwroStCVeqZwOB
rDdOteoYkRt6lHYR3jA4WtLqsghFrpl6xagqJozB4qGZUQ3PXz1xOPaByQRwAbcd
ghZ8QngKnFe1T50WhX8r3UoFD3AqbkBcApLzwOf0Twf4QBMb+49oGZMmfZ4tnTCu
q1zPANnB0PRZVhsMr+tfc9M5GPCPVHfzQzAXnCdkgI8oTniANNDl6laj4wa64ne9
H1ZwiI7fR8eNAoIBABQKMw8P1XWUspNlrdY/hFYUndJNXqlc+VUN6z1BKqHIcYl2
Qdd2gILH4Uc32pq3Yaa8erZR5GzgNLJD57iEg9Tn01J32bnH1xk87czxsqgGM1Hw
81OCg+8Ziyf9WlMFzVexcYzxLVmA5Z9iIy1/X6VZLmUr9aiFqvnkVGb3Cyis+C9V
9xxvAU9Tx7UeiD9Rg/RwKAx1Z7AUzaj2mBkaJ8yv1H8HvGgTMjZMgFwyQdXUACIG
XljZuLVCC1sqVfoouyWxBwxrfCO2irwTx6zuwLMnYtst0jVrnSyrmaGAPkQYi+1A
7HXyDfHRl+B8LifRLpsk+gHRZwLPtHxCzA0wiOsCggEBAJvVJmqH5hdws0fpVuth
8doGOgOd0ZCCx8zq0T+Pl7ms8OE+LRlc2Ysz2Lp1oVGVNqLRAYt83TSQl2u1x/LY
spE3y39xV1szbyWIU2yVwE4zuhS6I/QH5Oxb/raCRFLfW0YG4q8RiLcqBZreWHBf
Dx8kyar9ICYhvF7ja5lIRKHNS5GklzfJfsfZqHfjGjEnu7Kho36emG1FfDro8mnt
miOrObnQjwtB10R3KQ+0Vpe/Qw+ZRQMutNncr/WIZ7U7kqifRYgj5z5n8b6DNXkK
JIhguH2fiuJdpJvxpxRjyockbWDc5/A4tQxx6Q1nDrwv54vWDRt07VvD9inrGEuv
nMkCggEBAK3Qud7mRdXT1GyVT2htguwH0EY+4czXy1mWZvIfZmd9rXzcKrYt/R08
q6Ym4liEzAyZmVJACGF5f9GyKriBinjXoCsIU9qLSsggCZhW48Ma2HXNx53g0u1U
xvIMcWgR/aMfjV2uu0keKCucP+ZCheHYq0Tn67KnhdtyhZiv9rPMVJ2IamDKC8ov
Tr/7ibSsW8xx84e/4rl310w/9CmuDtAP76aOWQ3zg5P0fq68mAKNPOhwlmHGNg7P
udczhHjEsAOiLth5A50ewf4SbnEjSTR8NYMKeIqxcgc/a74E20B84mB8+255LdsZ
OyNDZebtWToMbZu8LqzYns+6z16TmKA=
-----END PRIVATE KEY-----

View File

@@ -0,0 +1,35 @@
-----BEGIN CERTIFICATE-----
MIIGFDCCA/ygAwIBAgIUOuZ9BopS9seAhiWlSMwUSuT5KWswDQYJKoZIhvcNAQEL
BQAwbTELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUxvY2FsMQ4wDAYDVQQHDAVMb2Nh
bDESMBAGA1UECgwJUHJpdmF0ZUNBMQwwCgYDVQQLDANEZXYxHDAaBgNVBAMME1By
aXZhdGUtUmVnaXN0cnktQ0EwHhcNMjUxMTI3MTE0ODI2WhcNMzUxMTI1MTE0ODI2
WjBwMQswCQYDVQQGEwJVUzEOMAwGA1UECAwFTG9jYWwxDjAMBgNVBAcMBUxvY2Fs
MRgwFgYDVQQKDA9Qcml2YXRlUmVnaXN0cnkxDDAKBgNVBAsMA0RldjEZMBcGA1UE
AwwQcHJpdmF0ZS1yZWdpc3RyeTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC
ggIBAKKJWOvCWPuv3uRjKuRdMFigshFtRz1iFeezrs0GIefHTDX4oSV29QpeQD4Y
eURghpZhVAhE6kaLOCGIiJId5u5l4SGGHdjAt4653Px1a2TjXU2n42GK/b9Q4Mzo
KpcyyK7Rzjh5gKT1moEOgSa1hAXnnzfK6qtThFIamMg9b9xFCSEIU3kZ1us3BN1t
JjjkML27aCnEfXYlZPDVSyvV6dGG2xnI4MDiP1P5FkY10p/ryPMTbqCQpJaamwGX
xmmtieEEkXahO/xAug2cgTMz9TFmZHagvmZH7h/625MAAKMlPJvLoYM1E0q219nS
7TZ85H80ymV7G/otISnsThwX6G2gyS/s5ZbJAuKzNBAjfBl2fXOSIU+YVycbUAr4
2Wzn9rhGIh4972LYTuISAG3BjymFGsUAOxXaFcZwlGOtc1ORqLI4d5XK5yBBW+a7
+1bf9RfWq3KCDmhXgMDCaolHSMnVH3pyzkSuNfZB20Ic4cae7+TnHCUMvKBZ+H5o
N/V0Uo4WNC+tHxtTmGfjdKmvWgAzc8CaD+xtDHcJgRo45wQDBd0TaBR8XPSF9iDy
vnYDRv2cKHpfICDfvDUgN4mvHkABBj0ELQopSMq60nc8WvG6ftd2twoEMOuSekOx
PvDWQCuzhG/Rx2eAckdMIZgPAYiSjA/pCDI8SQXx3EVhW/3vAgMBAAGjgagwgaUw
HwYDVR0jBBgwFoAUGIeB+ihUWV2Ivf6c3mVE4MZVkN4wCQYDVR0TBAIwADALBgNV
HQ8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwNgYDVR0RBC8wLYIQcHJpdmF0
ZS1yZWdpc3RyeYIIcmVnaXN0cnmCCWxvY2FsaG9zdIcEfwAAATAdBgNVHQ4EFgQU
NL/k9cTjQNh6X5LfvYKTyxhccFAwDQYJKoZIhvcNAQELBQADggIBAGD8ZKPjCwhT
2UtNOVf4AXp9GRwEet1r1Q+jO/5suOMm3JsL+ohB00pZaWXdHfMpHvP/SMSXCdl5
fZTqXenTGTrxlItllLinJKjYtTYLH2LUyel8dOIhH+cM8KW/rMjwqnP/urWR+GA9
5t4wq6QZxz3cCoQbPSCRXBcuzR/b2/ebEILkyHGoixpUHsU9GjVaKHU+uqWwlUa5
RQYEgM7sCFhcdaxk2YXlBmqajOvUN/TW5Wxv0IkHZlyqg15DLUeZT+toCSrDJRh9
7zk6S3spTJL+R0P3NVDpvUeMXvo3NSJGlpUXDSqflIxs9V0FSD/Jj+koorW0TaDT
m2G0UrWE8FVuppOI4RkwN/iMBu5m5qM6OJhknZnzJEfgnX9UX+e2g89+JjT9OoVu
8s2kh0bdk+motkV7Vyfjh/jtpdesW+qkIO0RFZ2yKNIA13Tl0zu9okx0TI+Wt8FD
XRzO74dOpWwHjwCOQDaBa8Fy1cxxs+WtEMqjC36SPQzfiW9JaGMFRI9PZei7VNiY
crwOTu8wYjalXXUC8RWtXXUZQjcCp5tNPuCyJBTym2oh+uLu2mimgqYJ4KNEwNKB
21bAipI5hyY+b6AtZ/qHPxv+czL8sPhL4W/tZPCe++WWkQgC8sRa4aVrKnc14cIT
986BhJJ9j0EhPPMBOIbVo0cc75HQSQHE
-----END CERTIFICATE-----

View File

@@ -0,0 +1,52 @@
-----BEGIN PRIVATE KEY-----
MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCiiVjrwlj7r97k
YyrkXTBYoLIRbUc9YhXns67NBiHnx0w1+KEldvUKXkA+GHlEYIaWYVQIROpGizgh
iIiSHebuZeEhhh3YwLeOudz8dWtk411Np+Nhiv2/UODM6CqXMsiu0c44eYCk9ZqB
DoEmtYQF5583yuqrU4RSGpjIPW/cRQkhCFN5GdbrNwTdbSY45DC9u2gpxH12JWTw
1Usr1enRhtsZyODA4j9T+RZGNdKf68jzE26gkKSWmpsBl8ZprYnhBJF2oTv8QLoN
nIEzM/UxZmR2oL5mR+4f+tuTAACjJTyby6GDNRNKttfZ0u02fOR/NMplexv6LSEp
7E4cF+htoMkv7OWWyQLiszQQI3wZdn1zkiFPmFcnG1AK+Nls5/a4RiIePe9i2E7i
EgBtwY8phRrFADsV2hXGcJRjrXNTkaiyOHeVyucgQVvmu/tW3/UX1qtygg5oV4DA
wmqJR0jJ1R96cs5ErjX2QdtCHOHGnu/k5xwlDLygWfh+aDf1dFKOFjQvrR8bU5hn
43Spr1oAM3PAmg/sbQx3CYEaOOcEAwXdE2gUfFz0hfYg8r52A0b9nCh6XyAg37w1
IDeJrx5AAQY9BC0KKUjKutJ3PFrxun7XdrcKBDDrknpDsT7w1kArs4Rv0cdngHJH
TCGYDwGIkowP6QgyPEkF8dxFYVv97wIDAQABAoICAEH/66+wN1ncTHIJIr2gaaVT
e3tAGJGAZsyzVePC/bmUYAn6b9U6vL39D7EnVvbBC2W9F9ZTxZ3nol9bhblvkvpz
PDvUrgH6H49BQc7yDy3kdVq3NcnCGs+5E8+g5sqGwJ7caxTbobVaVebZ8O+6/WU4
bJrHNwti2nRMgIWvDOEw10gmjV67c14H9V3EmKS5ZGFm3CE5vIhhHt/8fI3MSynd
zNJnk3w/Yt/CYZ0Y9fIiWHL8DQv+MBdHqHG5I8R9x2Mr67V0O1tvHR2x03TrQEFT
BrB1DVuTEcrCnq7ObXPSBw5sXaVdw/uuy2+UCub5R/+vfBBBMVchRDo1znHx81sL
ByxnSm3FSnXYuI+v/k1BzTZeZCtPodo57s7w1bL9pkz6HZgob3ZNV5C1gqWz+dyv
gznB8xkZkhUu4783nBY4zgUZAm+eptjhSGX5gqDZ+lwLBHVkm9Ir1Yu4Jo153IVq
tJkTpPsq6E0kMKw3k75b39g4hu7iO2pzZ5bu8iqEcjnkUG47fHyRzPf1VopteW3Q
u3qMgnyO7G6IRYbcy0xKeumN4qtDh0w0PANOCkvqYpYk4kD1u3a4HYygJ+423VK/
SFJvNnMJuAsWruElaFF5bkmSqVIkfbDBZTmw/BtHfA7hf5J1o59LmkOJX8XvKouF
yK11dWuXh+wDgAg2AakBAoIBAQDUYOMFn6jUk2UC8RYUCxhVQmM32BM9xySkjsWb
zNMe9Onmc9jJu6eeCORl/IAZIr1Vc+vND0zs52FOVg1zfTX4X5SFpMall+fLcfIl
MDTJag7bcZX9jJnOaDSMK4jn44wrZDZBjf5F/sLcmcFT+qECnLEI6nqLcU2vBeAH
/KtJzxDLmmFsdgH8oSsiU6dIHbHsBPNtRgP3Xgkv7iuFBBJNwQfQWjysvz+jyC48
Q8oVtJkICLeMn8A2I0a9LaROXLAQi1HV5+PtvbZnC8NzOksgKWrb8uaqQNonOCG2
t/Ck7jKLeYn7Eu/Omln1FQDAf2jA1ZjffbSyxAJ9OlGSbENPAoIBAQDD67YUuls8
z+OMzKZ6lgJSWDDVKcg59bEBk6pCL09zQ4C4KE3wL0eq9cf7yctZFZHES+kebDUp
QS9K3kX0vpyIMtteykLDqjBq0dYYbExos/7i+sCIf0dmqPaW9cyJoEZAUYdMFuvx
vofQP4lm0hSRc+ijXjEXzS69ZeD/NzxABCPrRCppCYbKRa3IfitEh2xjIArHniW0
Wpur5iHRnCHxiNMtKE4wezGPQWJlslTrs2cVt5S5hkyKxfHBsPyrVSbZBFlF9vBI
mLFy4vxkfb3xVjQR6Ezt97aeG71VyC5yvZ1K16TznvzDGwELcd/Fycjaj5vNcp+U
89ZJdXk7xnNhAoIBAFdssseD29n1+uTlHXOOxauDMpiwZ+tMaPcclpf2Dwp1QzvM
gHc6ultBydN5x7mRJWNh3rWBEOeMr++xWMQrzOW7YsZI+ET+bTrAYy+P0or/D7Kh
5V6EXGQtXUQ+P5NFhlPuYq9FpmBl6Q0qdfz99P3ARtgmvd9c+t+LiZeAGXq+tGk7
2dLuGQ9HwRvWV8xF/RHtT8+xvLw9h4algmC1NluvlGneW4+5ApeHNhE0zqF0wHIg
NH683EDs8Je7jCF94jRNRZjKZnddWxK8Mu7iFj7dDdIRAYcgPy1Z2/b9bSBXtZLY
q0Yhm3nu7A0JYk/bouGOi+mkM5hLO8MVGLMvwd0CggEALAzAKJLp1pdrMwoEWEWI
ChmYCSVWxmlOPeuEeVMHywOfWkh9lYYb1/1g1GS/mqz11Cu5I0TzAu6MAopNMkT1
Ds5YckyJjFKkhi/dsioPV+84XLJCPa5YUGWm47QqI7tscCOkhuAUdor/IDxY2Uxc
oYNtB+YypYZVfvH8D4XMvxvvM4NlAa7JporaEt0DP2ovXW4j3lPZaF6C57hbXDR9
kT/RMzL/uXjJYMszo2fgHgp9H+3hu4DNjtoIjCMN/Dut+1c19zwZNElYhFsyoil/
XlaiaHBRc6OhZJUaEcJrZxLo3Z30kW3qqLdWmcslo+PFjBaD0kJ2TNgyEtwdwOnS
oQKCAQAGRfuTC7qMzQTtlW6NDePBCwRyGSp69N93aDw6e63vVRrKq9GEkZT+6Tpw
lIOKco3oFLRhdtSsHeFdmKqQ9lldkIs0KiBLB1yIdBcxppNCqRDV5xutTcurbmhr
8spVlr8XSgYQED97JaCVB5Df7VDQOIJDeiywq76y1+mNmcUCFcBNNkBFUiq1K5U7
NNRPmoLBfXcn+fipZWNrH8PVkW7y5jRAFpbxh5sWJ4WPXa4SDnXL4DceAduUoJWV
1HaKcVoKgY0UuE3zaak6s9YA85ZjSGgwexJJCteDMIm6QuGAAqamGryIRRo8Q6ML
jBviIlTtD+bygHVmSs4oZTtJMwn1
-----END PRIVATE KEY-----

28
tests/testdata/registry/config.yml vendored Normal file
View File

@@ -0,0 +1,28 @@
version: 0.1
log:
fields:
service: registry
storage:
filesystem:
rootdirectory: /var/lib/registry
delete:
enabled: true
http:
addr: :5000
# TLS configuration
tls:
certificate: /etc/docker/registry/ssl/registry/tls.crt
key: /etc/docker/registry/ssl/registry/tls.key
# Health endpoint
headers:
X-Content-Type-Options: [nosniff]
# Using mirror registry to avoid pushing to it in the test
proxy:
remoteurl: https://registry-1.docker.io
ttl: 0

View File

@@ -0,0 +1,8 @@
mirrors:
docker.io:
endpoint:
- "https://private-registry:5000"
configs:
"private-registry:5000":
tls:
ca_file: /etc/rancher/k3s/tls/ca.crt

View File

@@ -9,6 +9,7 @@ import (
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"testing"
"time"
@@ -22,16 +23,21 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
@@ -42,6 +48,9 @@ import (
const (
k3kNamespace = "k3k-system"
k3sVersion = "v1.35.2-k3s1"
k3sOldVersion = "v1.35.0-k3s1"
e2eTestLabel = "e2e"
slowTestsLabel = "slow"
updateTestsLabel = "update"
@@ -49,6 +58,13 @@ const (
networkingTestsLabel = "networking"
statusTestsLabel = "status"
certificatesTestsLabel = "certificates"
registryTestsLabel = "registry"
registryImage = "registry:2"
registryCACertSecretName = "private-registry-ca-cert"
registryCertSecretName = "private-registry-cert"
registryConfigSecretName = "private-registry-config"
k3sRegistryConfigSecretName = "k3s-registry-config"
)
func TestTests(t *testing.T) {
@@ -63,7 +79,6 @@ var (
k8s *kubernetes.Clientset
k8sClient client.Client
kubeconfigPath string
repo string
helmActionConfig *action.Configuration
)
@@ -72,17 +87,17 @@ var _ = BeforeSuite(func() {
GinkgoWriter.Println("GOCOVERDIR:", os.Getenv("GOCOVERDIR"))
repo = os.Getenv("REPO")
if repo == "" {
repo = "rancher"
}
_, dockerInstallEnabled := os.LookupEnv("K3K_DOCKER_INSTALL")
if dockerInstallEnabled {
installK3SDocker(ctx)
repo := os.Getenv("REPO")
if repo == "" {
repo = "rancher"
}
installK3SDocker(ctx, repo+"/k3k", repo+"/k3k-kubelet")
initKubernetesClient(ctx)
installK3kChart()
installK3kChart(repo+"/k3k", repo+"/k3k-kubelet")
} else {
initKubernetesClient(ctx)
}
@@ -96,6 +111,11 @@ func initKubernetesClient(ctx context.Context) {
kubeconfig []byte
)
logger, err := zap.NewDevelopment()
Expect(err).NotTo(HaveOccurred())
log.SetLogger(zapr.NewLogger(logger))
kubeconfigPath := os.Getenv("KUBECONFIG")
Expect(kubeconfigPath).To(Not(BeEmpty()))
@@ -114,17 +134,12 @@ func initKubernetesClient(ctx context.Context) {
scheme := buildScheme()
k8sClient, err = client.New(restcfg, client.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
logger, err := zap.NewDevelopment()
Expect(err).NotTo(HaveOccurred())
log.SetLogger(zapr.NewLogger(logger))
}
func buildScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
err := v1.AddToScheme(scheme)
err := clientgoscheme.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
@@ -132,7 +147,7 @@ func buildScheme() *runtime.Scheme {
return scheme
}
func installK3SDocker(ctx context.Context) {
func installK3SDocker(ctx context.Context, controllerImage, kubeletImage string) {
var (
err error
kubeconfig []byte
@@ -140,7 +155,7 @@ func installK3SDocker(ctx context.Context) {
k3sHostVersion := os.Getenv("K3S_HOST_VERSION")
if k3sHostVersion == "" {
k3sHostVersion = "v1.32.1+k3s1"
k3sHostVersion = k3sVersion
}
k3sHostVersion = strings.ReplaceAll(k3sHostVersion, "+", "-")
@@ -164,16 +179,15 @@ func installK3SDocker(ctx context.Context) {
Expect(tmpFile.Close()).To(Succeed())
kubeconfigPath = tmpFile.Name()
err = k3sContainer.LoadImages(ctx, repo+"/k3k:dev", repo+"/k3k-kubelet:dev")
err = k3sContainer.LoadImages(ctx, controllerImage+":dev", kubeletImage+":dev")
Expect(err).To(Not(HaveOccurred()))
DeferCleanup(os.Remove, kubeconfigPath)
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
GinkgoWriter.Print(kubeconfigPath)
GinkgoWriter.Print(string(kubeconfig))
GinkgoWriter.Printf("KUBECONFIG set to: %s\n", kubeconfigPath)
}
func installK3kChart() {
func installK3kChart(controllerImage, kubeletImage string) {
pwd, err := os.Getwd()
Expect(err).To(Not(HaveOccurred()))
@@ -189,7 +203,7 @@ func installK3kChart() {
Expect(err).To(Not(HaveOccurred()))
err = helmActionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
GinkgoWriter.Printf("helm debug: "+format+"\n", v...)
GinkgoWriter.Printf("[Helm] "+format+"\n", v...)
})
Expect(err).To(Not(HaveOccurred()))
@@ -201,9 +215,17 @@ func installK3kChart() {
iCli.Wait = true
controllerMap, _ := k3kChart.Values["controller"].(map[string]any)
extraEnvArray, _ := controllerMap["extraEnv"].([]map[string]any)
extraEnvArray = append(extraEnvArray, map[string]any{
"name": "DEBUG",
"value": "true",
})
controllerMap["extraEnv"] = extraEnvArray
imageMap, _ := controllerMap["image"].(map[string]any)
maps.Copy(imageMap, map[string]any{
"repository": repo + "/k3k",
"repository": controllerImage,
"tag": "dev",
"pullPolicy": "IfNotPresent",
})
@@ -212,14 +234,14 @@ func installK3kChart() {
sharedAgentMap, _ := agentMap["shared"].(map[string]any)
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
maps.Copy(sharedAgentImageMap, map[string]any{
"repository": repo + "/k3k-kubelet",
"repository": kubeletImage,
"tag": "dev",
})
release, err := iCli.Run(k3kChart, k3kChart.Values)
Expect(err).To(Not(HaveOccurred()))
GinkgoWriter.Printf("Release %s installed in %s namespace\n", release.Name, release.Namespace)
GinkgoWriter.Printf("Helm release '%s' installed in '%s' namespace\n", release.Name, release.Namespace)
}
func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
@@ -282,36 +304,28 @@ func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
_, err = clientset.AppsV1().Deployments(k3kNamespace).Update(ctx, k3kDeployment, metav1.UpdateOptions{})
Expect(err).To(Not(HaveOccurred()))
Eventually(func() bool {
Eventually(func(g Gomega) {
GinkgoWriter.Println("Checking K3k deployment status")
dep, err := clientset.AppsV1().Deployments(k3kNamespace).Get(ctx, k3kDeployment.Name, metav1.GetOptions{})
Expect(err).To(Not(HaveOccurred()))
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(dep.Generation).To(Equal(dep.Status.ObservedGeneration))
// 1. Check if the controller has observed the latest generation
if dep.Generation > dep.Status.ObservedGeneration {
GinkgoWriter.Printf("K3k deployment generation: %d, observed generation: %d\n", dep.Generation, dep.Status.ObservedGeneration)
return false
var availableCond appsv1.DeploymentCondition
for _, cond := range dep.Status.Conditions {
if cond.Type == appsv1.DeploymentAvailable {
availableCond = cond
break
}
}
// 2. Check if all replicas have been updated
if dep.Spec.Replicas != nil && dep.Status.UpdatedReplicas < *dep.Spec.Replicas {
GinkgoWriter.Printf("K3k deployment replicas: %d, updated replicas: %d\n", *dep.Spec.Replicas, dep.Status.UpdatedReplicas)
return false
}
// 3. Check if all updated replicas are available
if dep.Status.AvailableReplicas < dep.Status.UpdatedReplicas {
GinkgoWriter.Printf("K3k deployment available replicas: %d, updated replicas: %d\n", dep.Status.AvailableReplicas, dep.Status.UpdatedReplicas)
return false
}
return true
g.Expect(availableCond.Type).To(Equal(appsv1.DeploymentAvailable))
g.Expect(availableCond.Status).To(Equal(v1.ConditionTrue))
}).
MustPassRepeatedly(5).
WithPolling(time.Second).
WithTimeout(time.Second * 30).
Should(BeTrue())
Should(Succeed())
}
var _ = AfterSuite(func() {
@@ -324,6 +338,7 @@ var _ = AfterSuite(func() {
}
dumpK3kCoverageData(ctx, goCoverDir)
if k3sContainer != nil {
// dump k3s logs
k3sLogs, err := k3sContainer.Logs(ctx)
@@ -533,3 +548,227 @@ func caCertSecret(name, namespace string, crt, key []byte) *v1.Secret {
},
}
}
func privateRegistry(ctx context.Context, namespace string) error {
caCrtMap := map[string]string{
"tls.crt": filepath.Join("testdata", "registry", "certs", "ca.crt"),
"tls.key": filepath.Join("testdata", "registry", "certs", "ca.key"),
}
caSecret, err := buildRegistryConfigSecret(caCrtMap, namespace, registryCACertSecretName, true)
if err != nil {
return err
}
if err := k8sClient.Create(ctx, caSecret); err != nil {
return err
}
registryCrtMap := map[string]string{
"tls.crt": filepath.Join("testdata", "registry", "certs", "registry.crt"),
"tls.key": filepath.Join("testdata", "registry", "certs", "registry.key"),
}
registrySecret, err := buildRegistryConfigSecret(registryCrtMap, namespace, registryCertSecretName, true)
if err != nil {
return err
}
if err := k8sClient.Create(ctx, registrySecret); err != nil {
return err
}
configMap := map[string]string{
"config.yml": filepath.Join("testdata", "registry", "config.yml"),
}
configSecret, err := buildRegistryConfigSecret(configMap, namespace, registryConfigSecretName, false)
if err != nil {
return err
}
if err := k8sClient.Create(ctx, configSecret); err != nil {
return err
}
k3sRegistryConfig := map[string]string{
"registries.yaml": filepath.Join("testdata", "registry", "registries.yaml"),
}
k3sRegistrySecret, err := buildRegistryConfigSecret(k3sRegistryConfig, namespace, k3sRegistryConfigSecretName, false)
if err != nil {
return err
}
if err := k8sClient.Create(ctx, k3sRegistrySecret); err != nil {
return err
}
registryDeployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "private-registry",
Namespace: namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
Spec: appsv1.DeploymentSpec{
Replicas: ptr.To(int32(1)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "private-registry",
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "private-registry",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "private-registry",
Image: registryImage,
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
MountPath: "/etc/docker/registry/",
},
{
Name: "ca-cert",
MountPath: "/etc/docker/registry/ssl/ca",
},
{
Name: "registry-cert",
MountPath: "/etc/docker/registry/ssl/registry",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "config",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: "private-registry-config",
},
},
},
{
Name: "ca-cert",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: "private-registry-ca-cert",
},
},
},
{
Name: "registry-cert",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: "private-registry-cert",
},
},
},
},
},
},
},
}
if err := k8sClient.Create(ctx, registryDeployment); err != nil {
return err
}
registryService := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "private-registry",
Namespace: namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
Spec: v1.ServiceSpec{
Selector: map[string]string{
"app": "private-registry",
},
Ports: []v1.ServicePort{
{
Name: "registry-port",
Port: 5000,
TargetPort: intstr.FromInt(5000),
},
},
},
}
return k8sClient.Create(ctx, registryService)
}
func buildRegistryNetPolicy(ctx context.Context, namespace string) error {
np := networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "private-registry-test-netpol",
Namespace: namespace,
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"role": "server",
},
},
PolicyTypes: []networkingv1.PolicyType{
networkingv1.PolicyTypeEgress,
},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
IPBlock: &networkingv1.IPBlock{
CIDR: "10.0.0.0/8",
},
},
},
},
},
},
}
return k8sClient.Create(ctx, &np)
}
func buildRegistryConfigSecret(tlsMap map[string]string, namespace, name string, tlsSecret bool) (*v1.Secret, error) {
secretType := v1.SecretTypeOpaque
if tlsSecret {
secretType = v1.SecretTypeTLS
}
data := make(map[string][]byte)
for key, path := range tlsMap {
b, err := os.ReadFile(path)
if err != nil {
return nil, err
}
data[key] = b
}
secret := &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Type: secretType,
Data: data,
}
return secret, nil
}