Compare commits

..

16 Commits

Author SHA1 Message Date
renovate-rancher[bot]
1d5943d054 chore(deps): update github actions 2026-04-09 07:01:18 +00:00
renovate-rancher[bot]
d9c504283e chore(deps): update rancher/renovate-config digest to 20f34a3 (#749)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-04-08 23:07:22 +02:00
Enrico Candino
4738ab2c56 Schedule Pods in the same Node with a preferred affinity (#724) (#736)
* Add integration tests for Deployment and StatefulSet creation with PVC in shared cluster

* Add affinity settings for pod scheduling based on agent hostname

* increased timeout

* focus test

* rmeove cleanup

* check for existing pvc

* remove focus

* add affinity tests for Pods in shared cluster

* refactor restartServerPod to improve pod restart checks and timeout handling

* unfocus

* fix test description
2026-04-02 22:48:33 +02:00
Jonathan Crowther
7c1292b262 Pin installed versions to specific hashes (#730) (#732) 2026-03-27 12:21:20 -04:00
Jonathan Crowther
3038276b02 [v1.0] Refactor tests to their own directories (#725)
* Refactor tests to their own directories (#723)

* Move cli tests

* Move e2e tests to their own directory

* Move integration tests

* Fix path within the cli tests

* Move k3k-kubelet tests

* Improve the various make test- options

* Remove dead code from cli tests

* Update development.md with the new make commands

* Remove tests that didn't exist in v1.0
2026-03-26 11:38:51 -04:00
Enrico Candino
6da59f16b9 update golang grpc module (#712) (#713) 2026-03-20 00:31:23 +01:00
Jonathan Crowther
118ed838a7 Backport macOS script compatibility changes (#709) 2026-03-18 14:52:35 -04:00
Enrico Candino
fe924d3b89 Bump some tes dependencies and fix lint (#708) (#710) 2026-03-18 19:11:13 +01:00
Enrico Candino
831821206e chore(deps): update github actions (#707)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-03-18 12:26:20 +01:00
Enrico Candino
16f7a71861 chore(deps): update dependency go to v1.25.8 (#664) (#706)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-03-18 12:16:13 +01:00
Hussein Galal
b055aeff0f Refactor distribution algorithm to account for host capacity (#695)
* Refactor distribution algorithm to account for host capacity

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2026-03-12 21:28:18 +02:00
Enrico Candino
5ef99f83ee refactor: streamline K3S Docker installation and chart setup with dynamic repository handling (#694) 2026-03-12 11:11:17 +01:00
Hussein Galal
f55dd45775 use apireader instead of client for node registration in mirror host node (#687)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2026-03-10 17:41:02 +02:00
Enrico Candino
f2badbfdb1 Added policy in Cluster Status (#663) (#677)
* initial implementation

restored policyName

* added test, fixed priority scheduling

* requested changes from review

- wrapped errors
- fixed some kube-api-linter issues to match k8s conventions
- moved policy namespace check in the same condition branch
2026-02-18 11:31:29 +01:00
Andreas Kupries
e87d879a91 Merge pull request #675 from andreas-kupries/syncer-controller-owner-v1.0-backport
[v1.0 backport] change ControllerReferences over to OwnerReferences
2026-02-17 11:56:54 +01:00
Andreas Kupries
502f4389d2 fix: switch ControllerReferences over to OwnerReferences 2026-02-17 11:11:23 +01:00
88 changed files with 975 additions and 1058 deletions

View File

@@ -19,18 +19,18 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Set up Go
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
- name: Set up QEMU
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7
uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6
with:
distribution: goreleaser
version: v2
@@ -50,7 +50,7 @@ jobs:
output: 'trivy-results-k3kcli.sarif'
- name: Upload Trivy scan results to GitHub Security tab (k3kcli)
uses: github/codeql-action/upload-sarif@603b797f8b14b413fe025cd935a91c16c4782713 # v3
uses: github/codeql-action/upload-sarif@5c8a8a642e79153f5d047b10ec1cba1d1cc65699 # v3
with:
sarif_file: trivy-results-k3kcli.sarif
category: k3kcli
@@ -66,7 +66,7 @@ jobs:
output: 'trivy-results-k3k.sarif'
- name: Upload Trivy scan results to GitHub Security tab (k3k)
uses: github/codeql-action/upload-sarif@603b797f8b14b413fe025cd935a91c16c4782713 # v3
uses: github/codeql-action/upload-sarif@5c8a8a642e79153f5d047b10ec1cba1d1cc65699 # v3
with:
sarif_file: trivy-results-k3k.sarif
category: k3k
@@ -82,7 +82,7 @@ jobs:
output: 'trivy-results-k3k-kubelet.sarif'
- name: Upload Trivy scan results to GitHub Security tab (k3k-kubelet)
uses: github/codeql-action/upload-sarif@603b797f8b14b413fe025cd935a91c16c4782713 # v3
uses: github/codeql-action/upload-sarif@5c8a8a642e79153f5d047b10ec1cba1d1cc65699 # v3
with:
sarif_file: trivy-results-k3k-kubelet.sarif
category: k3k-kubelet

View File

@@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0

View File

@@ -20,13 +20,13 @@ jobs:
# The FOSSA token is shared between all repos in Rancher's GH org. It can be
# used directly and there is no need to request specific access to EIO.
- name: Read FOSSA token
uses: rancher-eio/read-vault-secrets@main
uses: rancher-eio/read-vault-secrets@0da85151ad1f19ed7986c41587e45aac1ace74b6 # v3
with:
secrets: |
secret/data/github/org/rancher/fossa/push token | FOSSA_API_KEY_PUSH_ONLY
- name: FOSSA scan
uses: fossas/fossa-action@main
uses: fossas/fossa-action@c414b9ad82eaad041e47a7cf62a4f02411f427a0 # v1.8.0
with:
api-key: ${{ env.FOSSA_API_KEY_PUSH_ONLY }}
# Only runs the scan and do not provide/returns any results back to the

View File

@@ -24,7 +24,7 @@ jobs:
run: echo "::error::Missing tag from input" && exit 1
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Check if release is draft
run: |

View File

@@ -21,7 +21,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
fetch-tags: true
@@ -31,22 +31,22 @@ jobs:
run: git checkout ${{ inputs.commit }}
- name: Set up Go
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
- name: Set up QEMU
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3
with:
image: tonistiigi/binfmt:qemu-v10.0.4-56
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
with:
version: v0.30.1
- name: "Read secrets"
uses: rancher-eio/read-vault-secrets@main
uses: rancher-eio/read-vault-secrets@0da85151ad1f19ed7986c41587e45aac1ace74b6 # v3
if: github.repository_owner == 'rancher'
with:
secrets: |
@@ -62,7 +62,7 @@ jobs:
echo "DOCKER_PASSWORD=${{ github.token }}" >> $GITHUB_ENV
- name: Login to container registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ env.DOCKER_USERNAME }}
@@ -85,7 +85,7 @@ jobs:
echo "CURRENT_TAG=${CURRENT_TAG}" >> "$GITHUB_OUTPUT"
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7
uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6
with:
distribution: goreleaser
version: v2

View File

@@ -51,7 +51,7 @@ permissions:
jobs:
call-workflow:
uses: rancher/renovate-config/.github/workflows/renovate-vault.yml@release
uses: rancher/renovate-config/.github/workflows/renovate-vault.yml@20f34a3e3d54ab17f4fd5a3037edd62f58e26c7a # release
with:
configMigration: ${{ inputs.configMigration || 'true' }}
logLevel: ${{ inputs.logLevel || 'info' }}

View File

@@ -8,6 +8,10 @@ on:
permissions:
contents: read
env:
ARCH: amd64
KUBECTL_VERSION: v1.35.3
jobs:
conformance:
runs-on: ubuntu-latest
@@ -18,15 +22,17 @@ jobs:
type:
- parallel
- serial
env:
K3D_VERSION: v5.8.3
K3D_BIN_HASH_AMD64: "dbaa79a76ace7f4ca230a1ff41dc7d8a5036a8ad0309e9c54f9bf3836dbe853e"
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
- uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
@@ -34,14 +40,18 @@ jobs:
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1
- name: Install hydrophone
run: go install sigs.k8s.io/hydrophone@latest
run: go install sigs.k8s.io/hydrophone@3de3e886a2f6f09635d8b981c195490af1584d97 #v0.7.0
- name: Install k3d # taken from github.com/rancher/rancher/.github/workflows/integration-tests.yaml
run: |
curl -sSfL -o k3d "https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-${ARCH}"
echo "${K3D_BIN_HASH_AMD64} k3d" | shasum -a 256 --check
sudo mv k3d /usr/local/bin
sudo chmod +x /usr/local/bin/k3d
- name: Install k3d and kubectl
run: |
wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
k3d version
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
curl -LO "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
- name: Setup Kubernetes (k3d)
env:
@@ -131,7 +141,7 @@ jobs:
--output-dir /tmp
- name: Archive conformance logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: conformance-${{ matrix.type }}-logs

View File

@@ -21,12 +21,12 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
- uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
@@ -34,7 +34,7 @@ jobs:
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1
- name: Install hydrophone
run: go install sigs.k8s.io/hydrophone@latest
run: go install sigs.k8s.io/hydrophone@3de3e886a2f6f09635d8b981c195490af1584d97 #v0.7.0
- name: Install k3s
env:
@@ -104,21 +104,21 @@ jobs:
kubectl logs -n k3k-system -l "app.kubernetes.io/name=k3k" --tail=-1 > /tmp/k3k.log
- name: Archive K3s logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: k3s-${{ matrix.type }}-logs
path: /tmp/k3s.log
- name: Archive K3k logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: k3k-${{ matrix.type }}-logs
path: /tmp/k3k.log
- name: Archive conformance logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: conformance-${{ matrix.type }}-logs

View File

@@ -16,17 +16,17 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
- uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo@5d1d628ac86668c8f944c8c491c3d1ab86b3bed4 #v2.28.1
- name: Setup environment
run: |
@@ -64,28 +64,28 @@ jobs:
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
- name: Upload coverage reports to Codecov (controller)
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${GOCOVERDIR}/cover.out
flags: controller
- name: Upload coverage reports to Codecov (e2e)
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: e2e
- name: Archive k3s logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: e2e-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: e2e-k3k-logs
@@ -95,17 +95,17 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
- uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo@5d1d628ac86668c8f944c8c491c3d1ab86b3bed4 #v2.28.1
- name: Setup environment
run: |
@@ -143,28 +143,28 @@ jobs:
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
- name: Upload coverage reports to Codecov (controller)
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${GOCOVERDIR}/cover.out
flags: controller
- name: Upload coverage reports to Codecov (e2e)
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: e2e
- name: Archive k3s logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: e2e-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: e2e-k3k-logs

View File

@@ -16,9 +16,9 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
- uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
@@ -26,7 +26,7 @@ jobs:
run: make test-unit
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
@@ -37,17 +37,17 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
- uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo@5d1d628ac86668c8f944c8c491c3d1ab86b3bed4 #v2.28.1
- name: Setup environment
run: |
@@ -78,21 +78,21 @@ jobs:
run: go tool covdata textfmt -i=${{ github.workspace }}/covdata -o ${{ github.workspace }}/covdata/cover.out
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${{ github.workspace }}/covdata/cover.out
flags: cli
- name: Archive k3s logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: cli-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: cli-k3k-logs

View File

@@ -15,10 +15,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Set up Go
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
cache: true

View File

@@ -60,24 +60,32 @@ test: ## Run all the tests
$(GINKGO) $(GINKGO_FLAGS) --label-filter=$(label-filter)
.PHONY: test-unit
test-unit: ## Run the unit tests (skips the e2e)
test-unit: ## Run the unit tests (skips the e2e and integration tests)
$(GINKGO) $(GINKGO_FLAGS) --skip-file=tests/*
.PHONY: test-controller
test-controller: ## Run the controller tests (pkg/controller)
$(GINKGO) $(GINKGO_FLAGS) pkg/controller
.PHONY: test-kubelet
test-kubelet: ## Run the k3k-kubelet controller tests (tests/integration/k3k-kubelet)
$(GINKGO) $(GINKGO_FLAGS) tests/integration/k3k-kubelet
.PHONY: test-kubelet-controller
test-kubelet-controller: ## Run the controller tests (pkg/controller)
$(GINKGO) $(GINKGO_FLAGS) k3k-kubelet/controller
.PHONY: test-policy
test-policy: ## Run the policy controller tests (tests/integration/policy)
$(GINKGO) $(GINKGO_FLAGS) tests/integration/policy
.PHONY: test-cluster
test-cluster: ## Run the cluster controller tests (tests/integration/cluster)
$(GINKGO) $(GINKGO_FLAGS) tests/integration/cluster
.PHONY: test-integration
test-integration: ## Run the controller tests that use envtest (tests/integration)
$(GINKGO) $(GINKGO_FLAGS) tests/integration
.PHONY: test-e2e
test-e2e: ## Run the e2e tests
$(GINKGO) $(GINKGO_FLAGS) --label-filter="$(E2E_LABEL_FILTER)" tests
$(GINKGO) $(GINKGO_FLAGS) --label-filter="$(E2E_LABEL_FILTER)" tests/e2e
.PHONY: test-cli
test-cli: ## Run the cli tests
$(GINKGO) $(GINKGO_FLAGS) --label-filter=cli --flake-attempts=3 tests
$(GINKGO) $(GINKGO_FLAGS) --flake-attempts=3 tests/cli
.PHONY: generate
generate: ## Generate the CRDs specs

View File

@@ -773,25 +773,6 @@ spec:
required:
- enabled
type: object
storageClasses:
default:
enabled: false
description: StorageClasses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
type: object
tlsSANs:
description: TLSSANs specifies subject alternative names for the K3s server certificate.
@@ -927,141 +908,6 @@ spec:
priorityClass:
description: priorityClass is the priority class enforced by the active VirtualClusterPolicy.
type: string
sync:
description: sync is the SyncConfig enforced by the active VirtualClusterPolicy.
properties:
configMaps:
default:
enabled: true
description: ConfigMaps resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
ingresses:
default:
enabled: false
description: Ingresses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
persistentVolumeClaims:
default:
enabled: true
description: PersistentVolumeClaims resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
priorityClasses:
default:
enabled: false
description: PriorityClasses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
secrets:
default:
enabled: true
description: Secrets resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
type: object
services:
default:
enabled: true
description: Services resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
storageClasses:
default:
enabled: false
description: StorageClasses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
type: object
required:
- name
type: object

View File

@@ -322,25 +322,6 @@ spec:
required:
- enabled
type: object
storageClasses:
default:
enabled: false
description: StorageClasses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
additionalProperties:
type: string
description: |-
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
type: object
type: object
status:

View File

@@ -61,7 +61,6 @@ _Appears In:_
| *`priorityClass`* __string__ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. + | |
| *`nodeSelector`* __object (keys:string, values:string)__ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. + | |
| *`sync`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]__ | sync is the SyncConfig enforced by the active VirtualClusterPolicy. + | |
|===
@@ -218,7 +217,7 @@ Each entry defines a secret and its mount path within the pods. + | |
ConfigMapSyncConfig specifies the sync options for ConfigMaps.
ConfigMapSyncConfig specifies the sync options for services.
@@ -353,7 +352,7 @@ _Appears In:_
IngressSyncConfig specifies the sync options for Ingresses.
IngressSyncConfig specifies the sync options for services.
@@ -464,7 +463,7 @@ _Appears In:_
PersistentVolumeClaimSyncConfig specifies the sync options for PersistentVolumeClaims.
PersistentVolumeClaimSyncConfig specifies the sync options for services.
@@ -502,7 +501,7 @@ _Appears In:_
PriorityClassSyncConfig specifies the sync options for PriorityClasses.
PriorityClassSyncConfig specifies the sync options for services.
@@ -569,7 +568,7 @@ This can be 'server', 'agent', or 'all' (for both). + | | Enum: [server agent a
SecretSyncConfig specifies the sync options for Secrets.
SecretSyncConfig specifies the sync options for services.
@@ -591,7 +590,7 @@ then all resources of the given type will be synced. + | |
ServiceSyncConfig specifies the sync options for Services.
ServiceSyncConfig specifies the sync options for services.
@@ -608,28 +607,6 @@ then all resources of the given type will be synced. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-storageclasssyncconfig"]
=== StorageClassSyncConfig
StorageClassSyncConfig specifies the sync options for StorageClasses.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | false |
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
then all resources of the given type will be synced. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig"]
=== SyncConfig
@@ -641,7 +618,6 @@ SyncConfig will contain the resources that should be synced from virtual cluster
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-appliedpolicy[$$AppliedPolicy$$]
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]
@@ -654,7 +630,6 @@ _Appears In:_
| *`ingresses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-ingresssyncconfig[$$IngressSyncConfig$$]__ | Ingresses resources sync configuration. + | { enabled:false } |
| *`persistentVolumeClaims`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistentvolumeclaimsyncconfig[$$PersistentVolumeClaimSyncConfig$$]__ | PersistentVolumeClaims resources sync configuration. + | { enabled:true } |
| *`priorityClasses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-priorityclasssyncconfig[$$PriorityClassSyncConfig$$]__ | PriorityClasses resources sync configuration. + | { enabled:false } |
| *`storageClasses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-storageclasssyncconfig[$$StorageClassSyncConfig$$]__ | StorageClasses resources sync configuration. + | { enabled:false } |
|===

View File

@@ -48,7 +48,6 @@ _Appears in:_
| `name` _string_ | name is the name of the VirtualClusterPolicy currently applied to this cluster. | | MinLength: 1 <br /> |
| `priorityClass` _string_ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. | | |
| `nodeSelector` _object (keys:string, values:string)_ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. | | |
| `sync` _[SyncConfig](#syncconfig)_ | sync is the SyncConfig enforced by the active VirtualClusterPolicy. | | |
#### Cluster
@@ -163,7 +162,7 @@ _Appears in:_
ConfigMapSyncConfig specifies the sync options for ConfigMaps.
ConfigMapSyncConfig specifies the sync options for services.
@@ -271,7 +270,7 @@ _Appears in:_
IngressSyncConfig specifies the sync options for Ingresses.
IngressSyncConfig specifies the sync options for services.
@@ -353,7 +352,7 @@ _Appears in:_
PersistentVolumeClaimSyncConfig specifies the sync options for PersistentVolumeClaims.
PersistentVolumeClaimSyncConfig specifies the sync options for services.
@@ -384,7 +383,7 @@ _Appears in:_
PriorityClassSyncConfig specifies the sync options for PriorityClasses.
PriorityClassSyncConfig specifies the sync options for services.
@@ -424,7 +423,7 @@ _Appears in:_
SecretSyncConfig specifies the sync options for Secrets.
SecretSyncConfig specifies the sync options for services.
@@ -441,7 +440,7 @@ _Appears in:_
ServiceSyncConfig specifies the sync options for Services.
ServiceSyncConfig specifies the sync options for services.
@@ -454,23 +453,6 @@ _Appears in:_
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### StorageClassSyncConfig
StorageClassSyncConfig specifies the sync options for StorageClasses.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### SyncConfig
@@ -480,7 +462,6 @@ SyncConfig will contain the resources that should be synced from virtual cluster
_Appears in:_
- [AppliedPolicy](#appliedpolicy)
- [ClusterSpec](#clusterspec)
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
@@ -492,7 +473,6 @@ _Appears in:_
| `ingresses` _[IngressSyncConfig](#ingresssyncconfig)_ | Ingresses resources sync configuration. | \{ enabled:false \} | |
| `persistentVolumeClaims` _[PersistentVolumeClaimSyncConfig](#persistentvolumeclaimsyncconfig)_ | PersistentVolumeClaims resources sync configuration. | \{ enabled:true \} | |
| `priorityClasses` _[PriorityClassSyncConfig](#priorityclasssyncconfig)_ | PriorityClasses resources sync configuration. | \{ enabled:false \} | |
| `storageClasses` _[StorageClassSyncConfig](#storageclasssyncconfig)_ | StorageClasses resources sync configuration. | \{ enabled:false \} | |
#### VirtualClusterPolicy

View File

@@ -51,9 +51,11 @@ To see all the available Make commands you can run `make help`, i.e:
package Package the k3k and k3k-kubelet Docker images
push Push the K3k images to the registry
test Run all the tests
test-unit Run the unit tests (skips the e2e)
test-controller Run the controller tests (pkg/controller)
test-kubelet-controller Run the controller tests (pkg/controller)
test-unit Run the unit tests (skips the e2e and integration tests)
test-kubelet Run the k3k-kubelet controller tests (tests/integration/k3k-kubelet)
test-policy Run the policy controller tests (tests/integration/policy)
test-cluster Run the cluster controller tests (tests/integration/cluster)
test-integration Run the controller tests (pkg/controller)
test-e2e Run the e2e tests
test-cli Run the cli tests
generate Generate the CRDs specs

9
go.mod
View File

@@ -9,8 +9,8 @@ require (
github.com/go-logr/logr v1.4.3
github.com/go-logr/zapr v1.3.0
github.com/google/go-cmp v0.7.0
github.com/onsi/ginkgo/v2 v2.28.1
github.com/onsi/gomega v1.39.1
github.com/onsi/ginkgo/v2 v2.21.0
github.com/onsi/gomega v1.36.0
github.com/rancher/dynamiclistener v1.27.5
github.com/sirupsen/logrus v1.9.4
github.com/spf13/cobra v1.10.2
@@ -47,7 +47,7 @@ require (
github.com/BurntSushi/toml v1.5.0 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/Masterminds/semver/v3 v3.3.0 // indirect
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
@@ -98,7 +98,7 @@ require (
github.com/google/btree v1.1.3 // indirect
github.com/google/cel-go v0.23.2 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
@@ -187,7 +187,6 @@ require (
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.48.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/mod v0.32.0 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/oauth2 v0.34.0 // indirect
golang.org/x/sync v0.19.0 // indirect

38
go.sum
View File

@@ -18,8 +18,8 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
@@ -128,12 +128,6 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs=
github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo=
github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M=
github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk=
github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE=
github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs=
@@ -163,8 +157,6 @@ github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9L
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
@@ -205,8 +197,8 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc=
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -249,8 +241,6 @@ github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE=
github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@@ -280,8 +270,6 @@ github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8S
github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
@@ -294,8 +282,6 @@ github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/Qd
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE=
github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A=
github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM=
github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
@@ -335,10 +321,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.36.0 h1:Pb12RlruUtj4XUuPUqeEWc6j5DkVVVA49Uf6YLfC95Y=
github.com/onsi/gomega v1.36.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
@@ -430,14 +416,6 @@ github.com/testcontainers/testcontainers-go v0.41.0 h1:mfpsD0D36YgkxGj2LrIyxuwQ9
github.com/testcontainers/testcontainers-go v0.41.0/go.mod h1:pdFrEIfaPl24zmBjerWTTYaY0M6UHsqA1YSvsoU40MI=
github.com/testcontainers/testcontainers-go/modules/k3s v0.41.0 h1:xvnllztzVajAMUmeb1BD6XQNacoY4gNEN6Gl1fhzUMI=
github.com/testcontainers/testcontainers-go/modules/k3s v0.41.0/go.mod h1:y9YF71J/D1tIoIY09dmtwEXPiHmuvntbK+MWuypq8OQ=
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=

View File

@@ -131,8 +131,19 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
}
}
var currentHostPVC v1.PersistentVolumeClaim
err := r.HostClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(syncedPVC), &currentHostPVC)
if err == nil {
log.V(1).Info("persistent volume claim already exist in the host cluster")
}
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// create the pvc on host
log.Info("creating the persistent volume claim for the first time on the host cluster")
log.Info("creating the persistent volume claim for the first time in the host cluster")
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
// handled by the host cluster.

View File

@@ -270,7 +270,7 @@ func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
return nil, nil, errors.New("unable to make nodeutil provider: " + err.Error())
}
err = provider.ConfigureNode(
provider.ConfigureNode(
k.logger,
pc.Node,
cfg.AgentHostname,
@@ -283,7 +283,7 @@ func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
cfg.MirrorHostNodes,
)
return utilProvider, &provider.Node{}, err
return utilProvider, &provider.Node{}, nil
}
}
@@ -305,9 +305,6 @@ func (k *kubelet) nodeOpts(srvPort int, namespace, name, hostname, agentIP strin
c.TLSConfig = tlsConfig
c.NodeSpec.Labels["kubernetes.io/role"] = "worker"
c.NodeSpec.Labels["node-role.kubernetes.io/worker"] = "true"
return nil
}
}

View File

@@ -14,14 +14,13 @@ import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, hostMgr manager.Manager, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) error {
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, hostMgr manager.Manager, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) {
ctx := context.Background()
if mirrorHostNodes {
var hostNode corev1.Node
if err := hostMgr.GetAPIReader().Get(ctx, types.NamespacedName{Name: node.Name}, &hostNode); err != nil {
logger.Error(err, "error getting host node for mirroring", "node", node.Name)
return err
logger.Error(err, "error getting host node for mirroring")
}
node.Spec = *hostNode.Spec.DeepCopy()
@@ -52,8 +51,6 @@ func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servi
startNodeCapacityUpdater(ctx, logger, hostMgr.GetClient(), virtualClient, virtualCluster, node.Name)
}
return nil
}
// nodeConditions returns the basic conditions which mark the node as ready

View File

@@ -398,8 +398,27 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
logger = logger.WithValues("pod", hostPod.Name)
// Schedule the host pod in the same host node of the virtual kubelet
hostPod.Spec.NodeName = p.agentHostname
// Clear the NodeName to allow scheduling, and set affinity to prefer scheduling the Pod on the same host node as the virtual kubelet,
// unless the user has specified their own affinity, in which case the user's affinity is respected.
hostPod.Spec.NodeName = ""
if hostPod.Spec.Affinity == nil {
hostPod.Spec.Affinity = &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{{
Weight: 100,
Preference: corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{{
Key: "kubernetes.io/hostname",
Operator: corev1.NodeSelectorOpIn,
Values: []string{p.agentHostname},
}},
},
}},
},
}
}
// The pod's own nodeSelector is ignored.
// The final selector is determined by the cluster spec, but overridden by a policy if present.

View File

@@ -249,14 +249,9 @@ type SyncConfig struct {
// +kubebuilder:default={"enabled": false}
// +optional
PriorityClasses PriorityClassSyncConfig `json:"priorityClasses"`
// StorageClasses resources sync configuration.
//
// +kubebuilder:default={"enabled": false}
// +optional
StorageClasses StorageClassSyncConfig `json:"storageClasses"`
}
// SecretSyncConfig specifies the sync options for Secrets.
// SecretSyncConfig specifies the sync options for services.
type SecretSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -271,7 +266,7 @@ type SecretSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// ServiceSyncConfig specifies the sync options for Services.
// ServiceSyncConfig specifies the sync options for services.
type ServiceSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -286,7 +281,7 @@ type ServiceSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// ConfigMapSyncConfig specifies the sync options for ConfigMaps.
// ConfigMapSyncConfig specifies the sync options for services.
type ConfigMapSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -301,7 +296,7 @@ type ConfigMapSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// IngressSyncConfig specifies the sync options for Ingresses.
// IngressSyncConfig specifies the sync options for services.
type IngressSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -316,7 +311,7 @@ type IngressSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// PersistentVolumeClaimSyncConfig specifies the sync options for PersistentVolumeClaims.
// PersistentVolumeClaimSyncConfig specifies the sync options for services.
type PersistentVolumeClaimSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -331,7 +326,7 @@ type PersistentVolumeClaimSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// PriorityClassSyncConfig specifies the sync options for PriorityClasses.
// PriorityClassSyncConfig specifies the sync options for services.
type PriorityClassSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -346,21 +341,6 @@ type PriorityClassSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// StorageClassSyncConfig specifies the sync options for StorageClasses.
type StorageClassSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
// +kubebuilder:default=false
// +required
Enabled bool `json:"enabled"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// ClusterMode is the possible provisioning mode of a Cluster.
//
// +kubebuilder:validation:Enum=shared;virtual
@@ -604,11 +584,6 @@ type AppliedPolicy struct {
//
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// sync is the SyncConfig enforced by the active VirtualClusterPolicy.
//
// +optional
Sync *SyncConfig `json:"sync,omitempty"`
}
// ClusterPhase is a high-level summary of the cluster's current lifecycle state.

View File

@@ -40,11 +40,6 @@ func (in *AppliedPolicy) DeepCopyInto(out *AppliedPolicy) {
(*out)[key] = val
}
}
if in.Sync != nil {
in, out := &in.Sync, &out.Sync
*out = new(SyncConfig)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedPolicy.
@@ -583,28 +578,6 @@ func (in *ServiceSyncConfig) DeepCopy() *ServiceSyncConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageClassSyncConfig) DeepCopyInto(out *StorageClassSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassSyncConfig.
func (in *StorageClassSyncConfig) DeepCopy() *StorageClassSyncConfig {
if in == nil {
return nil
}
out := new(StorageClassSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncConfig) DeepCopyInto(out *SyncConfig) {
*out = *in
@@ -614,7 +587,6 @@ func (in *SyncConfig) DeepCopyInto(out *SyncConfig) {
in.Ingresses.DeepCopyInto(&out.Ingresses)
in.PersistentVolumeClaims.DeepCopyInto(&out.PersistentVolumeClaims)
in.PriorityClasses.DeepCopyInto(&out.PriorityClasses)
in.StorageClasses.DeepCopyInto(&out.StorageClasses)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfig.

View File

@@ -11,7 +11,6 @@ import (
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
@@ -29,7 +28,6 @@ import (
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
rbacv1 "k8s.io/api/rbac/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
@@ -49,18 +47,11 @@ const (
clusterFinalizerName = "cluster.k3k.io/finalizer"
ClusterInvalidName = "system"
SyncEnabledLabelKey = "k3k.io/sync-enabled"
SyncSourceLabelKey = "k3k.io/sync-source"
SyncSourceHostLabel = "host"
defaultVirtualClusterCIDR = "10.52.0.0/16"
defaultVirtualServiceCIDR = "10.53.0.0/16"
defaultSharedClusterCIDR = "10.42.0.0/16"
defaultSharedServiceCIDR = "10.43.0.0/16"
memberRemovalTimeout = time.Minute * 1
storageClassEnabledIndexField = "spec.sync.storageClasses.enabled"
storageClassStatusEnabledIndexField = "status.policy.sync.storageClasses.enabled"
)
var (
@@ -124,82 +115,15 @@ func Add(ctx context.Context, mgr manager.Manager, config *Config, maxConcurrent
},
}
// index the 'spec.sync.storageClasses.enabled' field
err = mgr.GetCache().IndexField(ctx, &v1beta1.Cluster{}, storageClassEnabledIndexField, func(rawObj client.Object) []string {
vc := rawObj.(*v1beta1.Cluster)
if vc.Spec.Sync != nil && vc.Spec.Sync.StorageClasses.Enabled {
return []string{"true"}
}
return []string{"false"}
})
if err != nil {
return err
}
// index the 'status.policy.sync.storageClasses.enabled' field
err = mgr.GetCache().IndexField(ctx, &v1beta1.Cluster{}, storageClassStatusEnabledIndexField, func(rawObj client.Object) []string {
vc := rawObj.(*v1beta1.Cluster)
if vc.Status.Policy != nil && vc.Status.Policy.Sync != nil && vc.Status.Policy.Sync.StorageClasses.Enabled {
return []string{"true"}
}
return []string{"false"}
})
if err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1beta1.Cluster{}).
Watches(&v1.Namespace{}, namespaceEventHandler(&reconciler)).
Watches(&storagev1.StorageClass{},
handler.EnqueueRequestsFromMapFunc(reconciler.mapStorageClassToCluster),
).
Owns(&apps.StatefulSet{}).
Owns(&v1.Service{}).
WithOptions(ctrlcontroller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
Complete(&reconciler)
}
func (r *ClusterReconciler) mapStorageClassToCluster(ctx context.Context, obj client.Object) []reconcile.Request {
log := ctrl.LoggerFrom(ctx)
if _, ok := obj.(*storagev1.StorageClass); !ok {
return nil
}
// Merge and deduplicate clusters
allClusters := make(map[types.NamespacedName]struct{})
var specClusterList v1beta1.ClusterList
if err := r.Client.List(ctx, &specClusterList, client.MatchingFields{storageClassEnabledIndexField: "true"}); err != nil {
log.Error(err, "error listing clusters with spec sync enabled for storageclass sync")
} else {
for _, cluster := range specClusterList.Items {
allClusters[client.ObjectKeyFromObject(&cluster)] = struct{}{}
}
}
var statusClusterList v1beta1.ClusterList
if err := r.Client.List(ctx, &statusClusterList, client.MatchingFields{storageClassStatusEnabledIndexField: "true"}); err != nil {
log.Error(err, "error listing clusters with status sync enabled for storageclass sync")
} else {
for _, cluster := range statusClusterList.Items {
allClusters[client.ObjectKeyFromObject(&cluster)] = struct{}{}
}
}
requests := make([]reconcile.Request, 0, len(allClusters))
for key := range allClusters {
requests = append(requests, reconcile.Request{NamespacedName: key})
}
return requests
}
func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
return handler.Funcs{
// We don't need to update for create or delete events
@@ -426,22 +350,11 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
return err
}
if err := c.bindClusterRoles(ctx, cluster); err != nil {
return err
}
if err := c.ensureKubeconfigSecret(ctx, cluster, serviceIP, 443); err != nil {
return err
}
// Important: if you need to call the Server API of the Virtual Cluster
// this needs to be done AFTER he kubeconfig has been generated
if err := c.ensureStorageClasses(ctx, cluster); err != nil {
return err
}
return nil
return c.bindClusterRoles(ctx, cluster)
}
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
@@ -707,120 +620,6 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1beta1.
return nil
}
func (c *ClusterReconciler) ensureStorageClasses(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Ensuring cluster StorageClasses")
virtualClient, err := newVirtualClient(ctx, c.Client, cluster.Name, cluster.Namespace)
if err != nil {
return fmt.Errorf("failed creating virtual client: %w", err)
}
appliedSync := cluster.Spec.Sync.DeepCopy()
// If a policy is applied to the virtual cluster we need to use its SyncConfig, if available
if cluster.Status.Policy != nil && cluster.Status.Policy.Sync != nil {
appliedSync = cluster.Status.Policy.Sync
}
// If storageclass sync is disabled, clean up any managed storage classes.
if appliedSync == nil || !appliedSync.StorageClasses.Enabled {
err := virtualClient.DeleteAllOf(ctx, &storagev1.StorageClass{}, client.MatchingLabels{SyncSourceLabelKey: SyncSourceHostLabel})
return client.IgnoreNotFound(err)
}
var hostStorageClasses storagev1.StorageClassList
if err := c.Client.List(ctx, &hostStorageClasses); err != nil {
return fmt.Errorf("failed listing host storageclasses: %w", err)
}
// filter the StorageClasses disabled for the sync, and the one not matching the selector
filteredHostStorageClasses := make(map[string]storagev1.StorageClass)
for _, sc := range hostStorageClasses.Items {
syncEnabled, found := sc.Labels[SyncEnabledLabelKey]
// if sync is disabled -> continue
if found && syncEnabled != "true" {
log.V(1).Info("sync is disabled", "sc-name", sc.Name)
continue
}
// if selector doesn't match -> continue
// an empty selector matche everything
selector := labels.SelectorFromSet(appliedSync.StorageClasses.Selector)
if !selector.Matches(labels.Set(sc.Labels)) {
log.V(1).Info("selector not matching", "sc-name", sc.Name)
continue
}
log.V(1).Info("keeping storageclass", "sc-name", sc.Name)
filteredHostStorageClasses[sc.Name] = sc
}
var virtStorageClasses storagev1.StorageClassList
if err = virtualClient.List(ctx, &virtStorageClasses, client.MatchingLabels{SyncSourceLabelKey: SyncSourceHostLabel}); err != nil {
return fmt.Errorf("failed listing virtual storageclasses: %w", err)
}
// delete StorageClasses with the sync disabled
for _, sc := range virtStorageClasses.Items {
if _, found := filteredHostStorageClasses[sc.Name]; !found {
log.V(1).Info("deleting storageclass", "sc-name", sc.Name)
if errDelete := virtualClient.Delete(ctx, &sc); errDelete != nil {
log.Error(errDelete, "failed to delete virtual storageclass", "name", sc.Name)
err = errors.Join(err, errDelete)
}
}
}
for _, hostSc := range filteredHostStorageClasses {
log.V(1).Info("updating storageclass", "sc-name", hostSc.Name)
virtualSc := hostSc.DeepCopy()
virtualSc.ObjectMeta = metav1.ObjectMeta{
Name: hostSc.Name,
Labels: hostSc.Labels,
Annotations: hostSc.Annotations,
}
_, errCreateOrUpdate := controllerutil.CreateOrUpdate(ctx, virtualClient, virtualSc, func() error {
virtualSc.Annotations = hostSc.Annotations
virtualSc.Labels = hostSc.Labels
if len(virtualSc.Labels) == 0 {
virtualSc.Labels = make(map[string]string)
}
virtualSc.Labels[SyncSourceLabelKey] = SyncSourceHostLabel
virtualSc.Provisioner = hostSc.Provisioner
virtualSc.Parameters = hostSc.Parameters
virtualSc.ReclaimPolicy = hostSc.ReclaimPolicy
virtualSc.MountOptions = hostSc.MountOptions
virtualSc.AllowVolumeExpansion = hostSc.AllowVolumeExpansion
virtualSc.VolumeBindingMode = hostSc.VolumeBindingMode
virtualSc.AllowedTopologies = hostSc.AllowedTopologies
return nil
})
if errCreateOrUpdate != nil {
log.Error(errCreateOrUpdate, "failed to create or update virtual storageclass", "name", virtualSc.Name)
err = errors.Join(err, errCreateOrUpdate)
}
}
if err != nil {
return fmt.Errorf("failed to sync storageclasses: %w", err)
}
return nil
}
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server) error {
log := ctrl.LoggerFrom(ctx)
@@ -943,6 +742,11 @@ func (c *ClusterReconciler) validate(cluster *v1beta1.Cluster, policy v1beta1.Vi
}
}
// validate sync policy
if !equality.Semantic.DeepEqual(cluster.Spec.Sync, policy.Spec.Sync) {
return fmt.Errorf("sync configuration %v is not allowed by the policy %q", cluster.Spec.Sync, policy.Name)
}
return nil
}

View File

@@ -476,7 +476,6 @@ func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context,
Name: policy.Name,
PriorityClass: &policy.Spec.DefaultPriorityClass,
NodeSelector: policy.Spec.DefaultNodeSelector,
Sync: policy.Spec.Sync,
}
if !reflect.DeepEqual(origStatus, &cluster.Status) {

View File

@@ -1,4 +1,4 @@
package k3k_test
package cli_test
import (
"bytes"

55
tests/cli/common_test.go Normal file
View File

@@ -0,0 +1,55 @@
package cli_test
import (
"context"
"fmt"
"os"
"sync"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func NewNamespace() *v1.Namespace {
GinkgoHelper()
namespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-", Labels: map[string]string{"e2e": "true"}}}
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
return namespace
}
func DeleteNamespaces(names ...string) {
GinkgoHelper()
if _, found := os.LookupEnv("KEEP_NAMESPACES"); found {
By(fmt.Sprintf("Keeping namespace %v", names))
return
}
wg := sync.WaitGroup{}
wg.Add(len(names))
for _, name := range names {
go func() {
defer wg.Done()
defer GinkgoRecover()
By(fmt.Sprintf("Deleting namespace %s", name))
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{
GracePeriodSeconds: ptr.To[int64](0),
})
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
}()
}
wg.Wait()
}

View File

@@ -0,0 +1,56 @@
package cli_test
import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
memory "k8s.io/client-go/discovery/cached"
)
type RESTClientGetter struct {
clientconfig clientcmd.ClientConfig
restConfig *rest.Config
discoveryClient discovery.CachedDiscoveryInterface
}
func NewRESTClientGetter(kubeconfig []byte) (*RESTClientGetter, error) {
clientconfig, err := clientcmd.NewClientConfigFromBytes([]byte(kubeconfig))
if err != nil {
return nil, err
}
restConfig, err := clientconfig.ClientConfig()
if err != nil {
return nil, err
}
dc, err := discovery.NewDiscoveryClientForConfig(restConfig)
if err != nil {
return nil, err
}
return &RESTClientGetter{
clientconfig: clientconfig,
restConfig: restConfig,
discoveryClient: memory.NewMemCacheClient(dc),
}, nil
}
func (r *RESTClientGetter) ToRESTConfig() (*rest.Config, error) {
return r.restConfig, nil
}
func (r *RESTClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
return r.discoveryClient, nil
}
func (r *RESTClientGetter) ToRESTMapper() (meta.RESTMapper, error) {
return restmapper.NewDeferredDiscoveryRESTMapper(r.discoveryClient), nil
}
func (r *RESTClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig {
return r.clientconfig
}

View File

@@ -0,0 +1,234 @@
package cli_test
import (
"context"
"io"
"maps"
"os"
"path"
"strings"
"testing"
"time"
"github.com/go-logr/zapr"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/modules/k3s"
"go.uber.org/zap"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart/loader"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
const (
k3kNamespace = "k3k-system"
k3sVersion = "v1.35.2-k3s1"
k3sOldVersion = "v1.35.0-k3s1"
)
func TestTests(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Tests Suite")
}
var (
k3sContainer *k3s.K3sContainer
restcfg *rest.Config
k8s *kubernetes.Clientset
k8sClient client.Client
kubeconfigPath string
helmActionConfig *action.Configuration
)
var _ = BeforeSuite(func() {
ctx := context.Background()
_, dockerInstallEnabled := os.LookupEnv("K3K_DOCKER_INSTALL")
if dockerInstallEnabled {
repo := os.Getenv("REPO")
if repo == "" {
repo = "rancher"
}
installK3SDocker(ctx, repo+"/k3k", repo+"/k3k-kubelet")
initKubernetesClient()
installK3kChart(repo+"/k3k", repo+"/k3k-kubelet")
} else {
initKubernetesClient()
}
})
func initKubernetesClient() {
var (
err error
kubeconfig []byte
)
logger, err := zap.NewDevelopment()
Expect(err).NotTo(HaveOccurred())
log.SetLogger(zapr.NewLogger(logger))
kubeconfigPath := os.Getenv("KUBECONFIG")
Expect(kubeconfigPath).To(Not(BeEmpty()))
kubeconfig, err = os.ReadFile(kubeconfigPath)
Expect(err).To(Not(HaveOccurred()))
restcfg, err = clientcmd.RESTConfigFromKubeConfig(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
k8s, err = kubernetes.NewForConfig(restcfg)
Expect(err).To(Not(HaveOccurred()))
scheme := buildScheme()
k8sClient, err = client.New(restcfg, client.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
}
func buildScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
err := clientgoscheme.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
}
func installK3SDocker(ctx context.Context, controllerImage, kubeletImage string) {
var (
err error
kubeconfig []byte
)
k3sHostVersion := os.Getenv("K3S_HOST_VERSION")
if k3sHostVersion == "" {
k3sHostVersion = k3sVersion
}
k3sHostVersion = strings.ReplaceAll(k3sHostVersion, "+", "-")
k3sContainer, err = k3s.Run(ctx, "rancher/k3s:"+k3sHostVersion)
Expect(err).To(Not(HaveOccurred()))
containerIP, err := k3sContainer.ContainerIP(ctx)
Expect(err).To(Not(HaveOccurred()))
GinkgoWriter.Println("K3s containerIP: " + containerIP)
kubeconfig, err = k3sContainer.GetKubeConfig(context.Background())
Expect(err).To(Not(HaveOccurred()))
tmpFile, err := os.CreateTemp("", "kubeconfig-")
Expect(err).To(Not(HaveOccurred()))
_, err = tmpFile.Write(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
Expect(tmpFile.Close()).To(Succeed())
kubeconfigPath = tmpFile.Name()
err = k3sContainer.LoadImages(ctx, controllerImage+":dev", kubeletImage+":dev")
Expect(err).To(Not(HaveOccurred()))
DeferCleanup(os.Remove, kubeconfigPath)
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
GinkgoWriter.Printf("KUBECONFIG set to: %s\n", kubeconfigPath)
}
func installK3kChart(controllerImage, kubeletImage string) {
pwd, err := os.Getwd()
Expect(err).To(Not(HaveOccurred()))
k3kChart, err := loader.Load(path.Join(pwd, "../../charts/k3k"))
Expect(err).To(Not(HaveOccurred()))
helmActionConfig = new(action.Configuration)
kubeconfig, err := os.ReadFile(kubeconfigPath)
Expect(err).To(Not(HaveOccurred()))
restClientGetter, err := NewRESTClientGetter(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
err = helmActionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
GinkgoWriter.Printf("[Helm] "+format+"\n", v...)
})
Expect(err).To(Not(HaveOccurred()))
iCli := action.NewInstall(helmActionConfig)
iCli.ReleaseName = "k3k"
iCli.Namespace = k3kNamespace
iCli.CreateNamespace = true
iCli.Timeout = time.Minute
iCli.Wait = true
controllerMap, _ := k3kChart.Values["controller"].(map[string]any)
extraEnvArray, _ := controllerMap["extraEnv"].([]map[string]any)
extraEnvArray = append(extraEnvArray, map[string]any{
"name": "DEBUG",
"value": "true",
})
controllerMap["extraEnv"] = extraEnvArray
imageMap, _ := controllerMap["image"].(map[string]any)
maps.Copy(imageMap, map[string]any{
"repository": controllerImage,
"tag": "dev",
"pullPolicy": "IfNotPresent",
})
agentMap, _ := k3kChart.Values["agent"].(map[string]any)
sharedAgentMap, _ := agentMap["shared"].(map[string]any)
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
maps.Copy(sharedAgentImageMap, map[string]any{
"repository": kubeletImage,
"tag": "dev",
})
release, err := iCli.Run(k3kChart, k3kChart.Values)
Expect(err).To(Not(HaveOccurred()))
GinkgoWriter.Printf("Helm release '%s' installed in '%s' namespace\n", release.Name, release.Namespace)
}
var _ = AfterSuite(func() {
ctx := context.Background()
if k3sContainer != nil {
// dump k3s logs
k3sLogs, err := k3sContainer.Logs(ctx)
Expect(err).To(Not(HaveOccurred()))
writeLogs("k3s.log", k3sLogs)
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
}
})
func writeLogs(filename string, logs io.ReadCloser) {
logsStr, err := io.ReadAll(logs)
Expect(err).To(Not(HaveOccurred()))
tempfile := path.Join(os.TempDir(), filename)
err = os.WriteFile(tempfile, []byte(logsStr), 0o644)
Expect(err).To(Not(HaveOccurred()))
GinkgoWriter.Println("logs written to: " + filename)
_ = logs.Close()
}

View File

@@ -1,44 +0,0 @@
package k3k_test
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("creating a shared mode cluster", Label(e2eTestLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
})
It("creates nodes with the worker role", func() {
Eventually(func(g Gomega) {
nodes, err := virtualCluster.Client.CoreV1().Nodes().List(GinkgoT().Context(), metav1.ListOptions{})
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(nodes.Items).To(HaveLen(1))
g.Expect(nodes.Items[0].Labels).To(HaveKeyWithValue("node-role.kubernetes.io/worker", "true"))
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(Succeed())
})
})

View File

@@ -1,200 +0,0 @@
package k3k_test
import (
"context"
"time"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/policy"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a shared mode cluster is created in a namespace with a policy", Ordered, Label(e2eTestLabel), func() {
var (
ctx context.Context
virtualCluster *VirtualCluster
vcp *v1beta1.VirtualClusterPolicy
)
BeforeAll(func() {
ctx = context.Background()
// 1. Create StorageClasses in host
storageClassEnabled := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-policy-enabled-",
Labels: map[string]string{
cluster.SyncEnabledLabelKey: "true",
},
},
Provisioner: "my-provisioner",
}
var err error
storageClassEnabled, err = k8s.StorageV1().StorageClasses().Create(ctx, storageClassEnabled, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
storageClassDisabled := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-policy-disabled-",
Labels: map[string]string{
cluster.SyncEnabledLabelKey: "false",
},
},
Provisioner: "my-provisioner",
}
storageClassDisabled, err = k8s.StorageV1().StorageClasses().Create(ctx, storageClassDisabled, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
// 2. Create VirtualClusterPolicy with StorageClass sync enabled
vcp = &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "vcp-sync-sc-",
},
Spec: v1beta1.VirtualClusterPolicySpec{
Sync: &v1beta1.SyncConfig{
StorageClasses: v1beta1.StorageClassSyncConfig{
Enabled: true,
},
},
},
}
err = k8sClient.Create(ctx, vcp)
Expect(err).To(Not(HaveOccurred()))
// 3. Create Namespace with policy label
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ns-vcp-",
Labels: map[string]string{
policy.PolicyNameLabelKey: vcp.Name,
},
},
}
// We use the k8s clientset for namespace creation to stay consistent with other tests
ns, err = k8s.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
// 4. Create VirtualCluster in that namespace
// The cluster doesn't have storage class sync enabled in its spec
clusterObj := NewCluster(ns.Name)
clusterObj.Spec.Sync = &v1beta1.SyncConfig{
StorageClasses: v1beta1.StorageClassSyncConfig{
Enabled: false,
},
}
clusterObj.Spec.Expose.NodePort.ServerPort = ptr.To[int32](30000)
CreateCluster(clusterObj)
client, restConfig, kubeconfig := NewVirtualK8sClientAndKubeconfig(clusterObj)
virtualCluster = &VirtualCluster{
Cluster: clusterObj,
RestConfig: restConfig,
Client: client,
Kubeconfig: kubeconfig,
}
DeferCleanup(func() {
DeleteNamespaces(ns.Name)
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassEnabled.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassDisabled.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
err = k8sClient.Delete(ctx, vcp)
Expect(err).To(Not(HaveOccurred()))
})
})
It("has the storage classes sync enabled from the policy", func() {
Eventually(func(g Gomega) {
key := client.ObjectKeyFromObject(virtualCluster.Cluster)
g.Expect(k8sClient.Get(ctx, key, virtualCluster.Cluster)).To(Succeed())
g.Expect(virtualCluster.Cluster.Status.Policy).To(Not(BeNil()))
g.Expect(virtualCluster.Cluster.Status.Policy.Sync).To(Not(BeNil()))
g.Expect(virtualCluster.Cluster.Status.Policy.Sync.StorageClasses.Enabled).To(BeTrue())
}).
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Succeed())
})
It("will sync host storage classes with the sync enabled in the host", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
g.Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
// We only care about the storage classes we created for this test to avoid noise
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "true" {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))
}
}
}).
WithPolling(time.Second).
WithTimeout(time.Second * 60).
Should(Succeed())
})
It("will not sync host storage classes with the sync disabled in the host", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
g.Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "false" {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
g.Expect(err).To(HaveOccurred())
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}
}
}).
WithPolling(time.Second).
WithTimeout(time.Second * 60).
Should(Succeed())
})
When("disabling the storage class sync in the policy", Ordered, func() {
BeforeAll(func() {
original := vcp.DeepCopy()
vcp.Spec.Sync.StorageClasses.Enabled = false
err := k8sClient.Patch(ctx, vcp, client.MergeFrom(original))
Expect(err).To(Not(HaveOccurred()))
})
It("will remove the synced storage classes from the virtual cluster", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
g.Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "true" {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
g.Expect(err).To(HaveOccurred())
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}
}
}).
WithPolling(time.Second).
WithTimeout(time.Second * 60).
Should(Succeed())
})
})
})

View File

@@ -1,194 +0,0 @@
package k3k_test
import (
"context"
"time"
"sigs.k8s.io/controller-runtime/pkg/client"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/controller/cluster"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a shared mode cluster is created", Ordered, Label(e2eTestLabel), func() {
var (
ctx context.Context
virtualCluster *VirtualCluster
)
BeforeAll(func() {
ctx = context.Background()
virtualCluster = NewVirtualCluster()
storageClassEnabled := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-",
Labels: map[string]string{
cluster.SyncEnabledLabelKey: "true",
},
},
Provisioner: "my-provisioner",
}
storageClassEnabled, err := k8s.StorageV1().StorageClasses().Create(ctx, storageClassEnabled, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
storageClassDisabled := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-",
Labels: map[string]string{
cluster.SyncEnabledLabelKey: "false",
},
},
Provisioner: "my-provisioner",
}
storageClassDisabled, err = k8s.StorageV1().StorageClasses().Create(ctx, storageClassDisabled, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
DeferCleanup(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassEnabled.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassDisabled.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
})
})
It("has disabled the storage classes sync", func() {
Expect(virtualCluster.Cluster.Spec.Sync).To(Not(BeNil()))
Expect(virtualCluster.Cluster.Spec.Sync.StorageClasses.Enabled).To(BeFalse())
})
It("doesn't have storage classes", func() {
virtualStorageClasses, err := virtualCluster.Client.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
Expect(virtualStorageClasses.Items).To(HaveLen(0))
})
It("has some storage classes in the host", func() {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
Expect(hostStorageClasses.Items).To(Not(HaveLen(0)))
})
It("can create storage classes in the virtual cluster", func() {
storageClass := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-",
},
Provisioner: "my-provisioner",
}
storageClass, err := virtualCluster.Client.StorageV1().StorageClasses().Create(ctx, storageClass, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
virtualStorageClasses, err := virtualCluster.Client.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
Expect(virtualStorageClasses.Items).To(HaveLen(1))
Expect(virtualStorageClasses.Items[0].Name).To(Equal(storageClass.Name))
})
When("enabling the storage class sync", Ordered, func() {
BeforeAll(func() {
GinkgoWriter.Println("Enabling the storage class sync")
original := virtualCluster.Cluster.DeepCopy()
virtualCluster.Cluster.Spec.Sync.StorageClasses.Enabled = true
err := k8sClient.Patch(ctx, virtualCluster.Cluster, client.MergeFrom(original))
Expect(err).To(Not(HaveOccurred()))
Eventually(func(g Gomega) {
key := client.ObjectKeyFromObject(virtualCluster.Cluster)
g.Expect(k8sClient.Get(ctx, key, virtualCluster.Cluster)).To(Succeed())
g.Expect(virtualCluster.Cluster.Spec.Sync.StorageClasses.Enabled).To(BeTrue())
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(Succeed())
})
It("will sync host storage classes with the sync enabled", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
if syncEnabled, found := hostSC.Labels[cluster.SyncEnabledLabelKey]; !found || syncEnabled == "true" {
g.Expect(err).To(Not(HaveOccurred()))
}
}
}).
MustPassRepeatedly(5).
WithPolling(time.Second).
WithTimeout(time.Second * 30).
Should(Succeed())
})
It("will not sync host storage classes with the sync disabled", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "false" {
g.Expect(err).To(HaveOccurred())
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}
}
}).
MustPassRepeatedly(5).
WithPolling(time.Second).
WithTimeout(time.Second * 30).
Should(Succeed())
})
})
When("editing a synced storage class in the host cluster", Ordered, func() {
var syncedStorageClass *storagev1.StorageClass
BeforeAll(func() {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
if syncEnabled, found := hostSC.Labels[cluster.SyncEnabledLabelKey]; !found || syncEnabled == "true" {
syncedStorageClass = &hostSC
break
}
}
Expect(syncedStorageClass).To(Not(BeNil()))
syncedStorageClass.Labels["foo"] = "bar"
_, err = k8s.StorageV1().StorageClasses().Update(ctx, syncedStorageClass, metav1.UpdateOptions{})
Expect(err).To(Not(HaveOccurred()))
})
It("will update the synced storage class in the virtual cluster", func() {
Eventually(func(g Gomega) {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, syncedStorageClass.Name, metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(syncedStorageClass.Labels).Should(HaveKeyWithValue("foo", "bar"))
}).
MustPassRepeatedly(5).
WithPolling(time.Second).
WithTimeout(time.Second * 30).
Should(Succeed())
})
})
})

View File

@@ -0,0 +1,341 @@
package k3k_test
import (
"context"
"time"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/utils/ptr"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/translate"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
var (
virtualCluster *VirtualCluster
translator *translate.ToHostTranslator
)
BeforeAll(func() {
virtualCluster = NewVirtualCluster()
translator = translate.NewHostTranslator(virtualCluster.Cluster)
DeferCleanup(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
})
When("creating a Deployment with a PVC", func() {
var (
deployment *appsv1.Deployment
pvc *v1.PersistentVolumeClaim
namespace = "default"
labels = map[string]string{
"app": "k3k-deployment-test-app",
}
)
BeforeAll(func() {
var err error
ctx := context.Background()
By("Creating the PVC")
pvc = &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "k3k-test-app-",
Namespace: namespace,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
}
pvc, err = virtualCluster.Client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvc, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Creating the Deployment")
deployment = &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "k3k-test-app-",
Namespace: namespace,
},
Spec: appsv1.DeploymentSpec{
Replicas: ptr.To[int32](3),
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "nginx",
VolumeMounts: []v1.VolumeMount{{
Name: "data-volume",
MountPath: "/data",
}},
},
},
Volumes: []v1.Volume{{
Name: "data-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
},
},
}},
},
},
},
}
deployment, err = virtualCluster.Client.AppsV1().Deployments(namespace).Create(ctx, deployment, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
})
It("should bound the PVC in the virtual cluster", func() {
ctx := context.Background()
Eventually(func(g Gomega) {
virtualPVC, err := virtualCluster.Client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(virtualPVC.Status.Phase).To(Equal(v1.ClaimBound))
}).
WithPolling(time.Second * 3).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
It("should bound the PVC in the host cluster", func() {
ctx := context.Background()
Eventually(func(g Gomega) {
hostPVCName := translator.NamespacedName(pvc)
hostPVC, err := k8s.CoreV1().PersistentVolumeClaims(hostPVCName.Namespace).Get(ctx, hostPVCName.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(hostPVC.Status.Phase).To(Equal(v1.ClaimBound))
}).
WithPolling(time.Second * 3).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
It("should have the Pods running in the virtual cluster", func() {
ctx := context.Background()
Eventually(func(g Gomega) {
labelSelector := metav1.FormatLabelSelector(deployment.Spec.Selector)
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
pods, err := virtualCluster.Client.CoreV1().Pods(namespace).List(ctx, listOpts)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pods.Items).Should(HaveLen(int(*deployment.Spec.Replicas)))
for _, pod := range pods.Items {
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
}
}).
WithPolling(time.Second * 3).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
It("should have the Pods running in the host cluster", func() {
ctx := context.Background()
Eventually(func(g Gomega) {
labelSelector := metav1.FormatLabelSelector(deployment.Spec.Selector)
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
pods, err := virtualCluster.Client.CoreV1().Pods(namespace).List(ctx, listOpts)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pods.Items).Should(HaveLen(int(*deployment.Spec.Replicas)))
for _, pod := range pods.Items {
hostPodName := translator.NamespacedName(&pod)
pod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
}
}).
WithPolling(time.Second * 3).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
})
When("creating a StatefulSet with a PVC", func() {
var (
statefulSet *appsv1.StatefulSet
namespace = "default"
labels = map[string]string{
"app": "k3k-sts-test-app",
}
)
BeforeAll(func() {
var err error
ctx := context.Background()
namespace := "default"
By("Creating the StatefulSet")
statefulSet = &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "k3k-sts-test-app-",
Namespace: namespace,
},
Spec: appsv1.StatefulSetSpec{
Replicas: ptr.To[int32](3),
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "nginx",
VolumeMounts: []v1.VolumeMount{{
Name: "www",
MountPath: "/usr/share/nginx/html",
}},
},
},
},
},
VolumeClaimTemplates: []v1.PersistentVolumeClaim{{
ObjectMeta: metav1.ObjectMeta{
Name: "www",
Labels: labels,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
}},
},
}
statefulSet, err = virtualCluster.Client.AppsV1().StatefulSets(namespace).Create(ctx, statefulSet, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
})
It("should bound the PVCs in the virtual cluster", func() {
ctx := context.Background()
Eventually(func(g Gomega) {
labelSelector := metav1.FormatLabelSelector(statefulSet.Spec.Selector)
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
pvcs, err := virtualCluster.Client.CoreV1().PersistentVolumeClaims(namespace).List(ctx, listOpts)
g.Expect(err).NotTo(HaveOccurred())
for _, pvc := range pvcs.Items {
g.Expect(pvc.Status.Phase).To(Equal(v1.ClaimBound))
}
}).
WithPolling(time.Second * 3).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
It("should bound the PVCs in the host cluster", func() {
ctx := context.Background()
Eventually(func(g Gomega) {
labelSelector := metav1.FormatLabelSelector(statefulSet.Spec.Selector)
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
pvcs, err := virtualCluster.Client.CoreV1().PersistentVolumeClaims(statefulSet.Namespace).List(ctx, listOpts)
g.Expect(err).NotTo(HaveOccurred())
for _, pvc := range pvcs.Items {
hostPVCName := translator.NamespacedName(&pvc)
hostPVC, err := k8s.CoreV1().PersistentVolumeClaims(hostPVCName.Namespace).Get(ctx, hostPVCName.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(hostPVC.Status.Phase).To(Equal(v1.ClaimBound))
}
}).
WithPolling(time.Second * 3).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
It("should have the Pods running in the virtual cluster", func() {
ctx := context.Background()
Eventually(func(g Gomega) {
labelSelector := metav1.FormatLabelSelector(statefulSet.Spec.Selector)
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
pods, err := virtualCluster.Client.CoreV1().Pods(namespace).List(ctx, listOpts)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pods.Items).Should(HaveLen(int(*statefulSet.Spec.Replicas)))
for _, pod := range pods.Items {
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
}
}).
WithPolling(time.Second * 3).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
It("should have the Pods running in the host cluster", func() {
ctx := context.Background()
Eventually(func(g Gomega) {
labelSelector := metav1.FormatLabelSelector(statefulSet.Spec.Selector)
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
pods, err := virtualCluster.Client.CoreV1().Pods(namespace).List(ctx, listOpts)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pods.Items).Should(HaveLen(int(*statefulSet.Spec.Replicas)))
for _, pod := range pods.Items {
hostPodName := translator.NamespacedName(&pod)
pod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
}
}).
WithPolling(time.Second * 3).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
})
})

View File

@@ -20,16 +20,132 @@ import (
)
var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
var virtualCluster *VirtualCluster
var (
virtualCluster *VirtualCluster
translator *translate.ToHostTranslator
)
BeforeAll(func() {
virtualCluster = NewVirtualCluster()
translator = translate.NewHostTranslator(virtualCluster.Cluster)
DeferCleanup(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
})
When("creating a Pod without any Affinity", func() {
var pod *v1.Pod
BeforeAll(func() {
var err error
ctx := context.Background()
pod = &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "nginx-",
Namespace: "default",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "nginx",
Image: "nginx",
}},
},
}
pod, err = virtualCluster.Client.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
})
It("should have the default Affinity", func() {
ctx := context.Background()
Eventually(func(g Gomega) {
hostPodName := translator.NamespacedName(pod)
hostPod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(hostPod.Spec.Affinity).To(Not(BeNil()))
g.Expect(hostPod.Spec.Affinity.NodeAffinity).To(Not(BeNil()))
g.Expect(hostPod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution).To(Not(BeNil()))
preferredScheduling := hostPod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution
g.Expect(preferredScheduling).To(Not(BeEmpty()))
g.Expect(preferredScheduling[0].Weight).To(Equal(int32(100)))
g.Expect(preferredScheduling[0].Preference.MatchExpressions).To(Not(BeEmpty()))
g.Expect(preferredScheduling[0].Preference.MatchExpressions[0].Key).To(Equal("kubernetes.io/hostname"))
}).
WithPolling(time.Second).
WithTimeout(time.Minute).
Should(Succeed())
})
})
When("creating a Pod with an Affinity", func() {
var pod *v1.Pod
BeforeAll(func() {
var err error
ctx := context.Background()
pod = &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "nginx-",
Namespace: "default",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "nginx",
Image: "nginx",
}},
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{{
MatchExpressions: []v1.NodeSelectorRequirement{{
Key: "kubernetes.io/hostname",
Operator: v1.NodeSelectorOpNotIn,
Values: []string{"fake"},
}},
}},
},
},
},
},
}
pod, err = virtualCluster.Client.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
})
It("should not have the default Affinity", func() {
ctx := context.Background()
Eventually(func(g Gomega) {
hostPodName := translator.NamespacedName(pod)
hostPod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(hostPod.Spec.Affinity).To(Not(BeNil()))
g.Expect(hostPod.Spec.Affinity.NodeAffinity).To(Not(BeNil()))
g.Expect(hostPod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution).To(Not(BeNil()))
requiredScheduling := hostPod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
g.Expect(requiredScheduling).To(Not(BeNil()))
g.Expect(requiredScheduling.NodeSelectorTerms).To(Not(BeEmpty()))
g.Expect(requiredScheduling.NodeSelectorTerms[0].MatchExpressions).To(Not(BeEmpty()))
g.Expect(requiredScheduling.NodeSelectorTerms[0].MatchExpressions[0].Key).To(Equal("kubernetes.io/hostname"))
g.Expect(requiredScheduling.NodeSelectorTerms[0].MatchExpressions[0].Values).To(ContainElement("fake"))
}).
WithPolling(time.Second).
WithTimeout(time.Minute).
Should(Succeed())
})
})
When("creating a Pod with an invalid configuration", func() {
var virtualPod *v1.Pod
@@ -140,7 +256,6 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
By("Checking the container status of the Pod in the Host Cluster")
Eventually(func(g Gomega) {
translator := translate.NewHostTranslator(virtualCluster.Cluster)
hostPodName := translator.NamespacedName(virtualPod)
pod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
@@ -207,7 +322,6 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
By("Checking the status of the Pod in the Host Cluster")
Eventually(func(g Gomega) {
translator := translate.NewHostTranslator(virtualCluster.Cluster)
hostPodName := translator.NamespacedName(virtualPod)
hPod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})

View File

@@ -412,13 +412,15 @@ func restartServerPod(ctx context.Context, virtualCluster *VirtualCluster) {
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
Eventually(func(g Gomega) {
serverPods := listServerPods(ctx, virtualCluster)
Expect(len(serverPods)).To(Equal(1))
return serverPods[0].DeletionTimestamp
}).WithTimeout(60 * time.Second).WithPolling(time.Second * 5).Should(BeNil())
g.Expect(serverPods).To(HaveLen(1))
g.Expect(serverPods[0].DeletionTimestamp).To(Not(BeNil()))
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(Succeed())
}
func listServerPods(ctx context.Context, virtualCluster *VirtualCluster) []v1.Pod {